path
stringlengths 9
117
| type
stringclasses 2
values | project
stringclasses 10
values | commit_hash
stringlengths 40
40
| commit_message
stringlengths 1
137
| ground_truth
stringlengths 0
2.74k
| main_code
stringlengths 102
3.37k
| context
stringlengths 0
14.7k
|
---|---|---|---|---|---|---|---|
dalle_mini.model.modeling/FlaxBartEncoderLayer.setup
|
Modified
|
borisdayma~dalle-mini
|
972bc8d2443eab9ccdae4c1a33fbd0ccb1083d7f
|
refactor(model): inherit from HF Flax & simplify
|
<6>:<add> bias=False,
<8>:<add> self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
<del> self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype)
<24>:<add> self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
<del> self.final_layer_norm = nn.LayerNorm(dtype=self.dtype)
|
# module: dalle_mini.model.modeling
+ class FlaxBartEncoderLayer(FlaxBartEncoderLayer):
- class FlaxBartEncoderLayer(nn.Module):
def setup(self) -> None:
<0> self.embed_dim = self.config.d_model
<1> self.self_attn = FlaxBartAttention(
<2> config=self.config,
<3> embed_dim=self.embed_dim,
<4> num_heads=self.config.encoder_attention_heads,
<5> dropout=self.config.attention_dropout,
<6> dtype=self.dtype,
<7> )
<8> self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype)
<9> self.dropout_layer = nn.Dropout(rate=self.config.dropout)
<10> self.activation_fn = ACT2FN[self.config.activation_function]
<11> self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
<12> self.fc1 = nn.Dense(
<13> self.config.encoder_ffn_dim,
<14> dtype=self.dtype,
<15> use_bias=False,
<16> kernel_init=jax.nn.initializers.normal(self.config.init_std),
<17> )
<18> self.fc2 = nn.Dense(
<19> self.embed_dim,
<20> dtype=self.dtype,
<21> use_bias=False,
<22> kernel_init=jax.nn.initializers.normal(self.config.init_std),
<23> )
<24> self.final_layer_norm = nn.LayerNorm(dtype=self.dtype)
<25>
|
===========unchanged ref 0===========
at: math
sqrt(x: SupportsFloat, /) -> float
at: transformers.configuration_utils.PretrainedConfig.__init__
self.pad_token_id = kwargs.pop("pad_token_id", None)
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.d_model = d_model
self.dropout = dropout
self.init_std = init_std
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
at: transformers.models.bart.modeling_flax_bart.FlaxBartDecoder
config: BartConfig
embed_tokens: nn.Embed
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
setup(self)
===========changed ref 0===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoderLayer(FlaxBartEncoderLayer):
- class FlaxBartEncoderLayer(nn.Module):
+ """
+ Edits:
+ - no bias
+ - use custom FlaxBartAttention
+ """
- config: DalleBartConfig
- dtype: jnp.dtype = jnp.float32
===========changed ref 1===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoder(FlaxBartDecoder):
+ """
+ Edits:
+ - offset set to 0 (no padding token)
+ - use image_length + 1 (for BOS) instead of max_position_embeddings
+ - use custom FlaxBartDecoderLayerCollection
+ - embed_tokens cannot be None (issue at compile time)
+ """
+
===========changed ref 2===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoder(FlaxBartEncoder):
+ """
+ Edits:
+ - offset set to 0 (no padding token)
+ - use max_text_length instead of max_position_embeddings
+ - use custom FlaxBartEncoderLayerCollection
+ - embed_tokens cannot be None (issue at compile time)
+ """
+
===========changed ref 3===========
# module: dalle_mini.model.modeling
+ class FlaxBartAttention(FlaxBartAttention):
- class FlaxBartAttention(nn.Module):
- def _merge_heads(self, hidden_states):
- return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
-
===========changed ref 4===========
# module: dalle_mini.model.modeling
+ class FlaxBartAttention(FlaxBartAttention):
- class FlaxBartAttention(nn.Module):
- def _split_heads(self, hidden_states):
- return hidden_states.reshape(
- hidden_states.shape[:2] + (self.num_heads, self.head_dim)
- )
-
===========changed ref 5===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoder(FlaxBartEncoder):
+ def setup(self):
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
+
+ embed_dim = self.config.d_model
+ self.padding_idx = self.config.pad_token_id
+ self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
+
+ # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
+ # and adjust num_embeddings appropriately. Other models don't have this hack
+ self.offset = 0
+ self.embed_positions = nn.Embed(
+ self.config.max_text_length + self.offset,
+ embed_dim,
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+ self.layers = FlaxBartEncoderLayerCollection(self.config, self.dtype)
+ self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+
===========changed ref 6===========
# module: dalle_mini.model.modeling
+ class FlaxBartAttention(FlaxBartAttention):
- class FlaxBartAttention(nn.Module):
- config: DalleBartConfig
- embed_dim: int
- num_heads: int
- dropout: float = 0.0
- causal: bool = False
- bias: bool = True
- dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ """
+ Edits:
+ - causal mask considers embed_dim instead of max_position_embeddings
+ """
===========changed ref 7===========
# module: dalle_mini.model.modeling
- def shift_tokens_right(
- input_ids: np.array, pad_token_id: int, decoder_start_token_id: int
- ) -> np.ndarray:
- """
- Shift input ids one token to the right.
- """
- shifted_input_ids = np.zeros_like(input_ids)
- shifted_input_ids[:, 1:] = input_ids[:, :-1]
- shifted_input_ids[:, 0] = decoder_start_token_id
-
- shifted_input_ids = np.where(
- shifted_input_ids == -100, pad_token_id, shifted_input_ids
- )
- return shifted_input_ids
-
===========changed ref 8===========
# module: dalle_mini.model.modeling
+ class FlaxBartAttention(FlaxBartAttention):
- class FlaxBartAttention(nn.Module):
def setup(self) -> None:
self.head_dim = self.embed_dim // self.num_heads
- assert (
+ if self.head_dim * self.num_heads != self.embed_dim:
- self.head_dim * self.num_heads == self.embed_dim
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
- ), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})."
+ f" and `num_heads`: {self.num_heads})."
+ )
dense = partial(
nn.Dense,
self.embed_dim,
+ use_bias=self.bias,
- use_bias=False,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
self.out_proj = dense()
self.dropout_layer = nn.Dropout(rate=self.dropout)
if self.causal:
self.causal_mask = make_causal_mask(
jnp.ones((1, self.embed_dim), dtype="bool"), dtype="bool"
)
|
dalle_mini.model.modeling/FlaxBartEncoderLayerCollection.setup
|
Modified
|
borisdayma~dalle-mini
|
972bc8d2443eab9ccdae4c1a33fbd0ccb1083d7f
|
refactor(model): inherit from HF Flax & simplify
|
<9>:<add> self.layerdrop = self.config.encoder_layerdrop
|
# module: dalle_mini.model.modeling
+ class FlaxBartEncoderLayerCollection(FlaxBartEncoderLayerCollection):
- class FlaxBartEncoderLayerCollection(nn.Module):
def setup(self):
<0> layer_module = (
<1> nn.remat(FlaxBartEncoderLayer)
<2> if self.config.gradient_checkpointing
<3> else FlaxBartEncoderLayer
<4> )
<5> self.layers = [
<6> layer_module(self.config, name=str(i), dtype=self.dtype)
<7> for i in range(self.config.encoder_layers)
<8> ]
<9>
|
===========unchanged ref 0===========
at: transformers.modeling_flax_utils.FlaxPreTrainedModel.__init__
self.params = random_params
===========changed ref 0===========
# module: dalle_mini.model.modeling
+ class FlaxBartPreTrainedModel(FlaxBartPreTrainedModel):
+ """
+ Edits:
+ - added num_params property
+ """
+
===========changed ref 1===========
# module: dalle_mini.model.modeling
+ class FlaxBartModule(FlaxBartModule):
+ """
+ Edits
+ - use custom FlaxBartEncoder & FlaxBartDecoder
+ - use separate embeddings for Encoder & Decoder
+ """
+
===========changed ref 2===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoderLayerCollection(FlaxBartEncoderLayerCollection):
- class FlaxBartEncoderLayerCollection(nn.Module):
+ """
+ Edits:
+ - use custom FlaxBartEncoderLayer
+ - allow Gradient Checkpointing (nn.remat)
+ """
- config: DalleBartConfig
- dtype: jnp.dtype = jnp.float32 # the dtype of the computation
===========changed ref 3===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoderLayer(FlaxBartEncoderLayer):
- class FlaxBartEncoderLayer(nn.Module):
+ """
+ Edits:
+ - no bias
+ - use custom FlaxBartAttention
+ """
- config: DalleBartConfig
- dtype: jnp.dtype = jnp.float32
===========changed ref 4===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoder(FlaxBartDecoder):
+ """
+ Edits:
+ - offset set to 0 (no padding token)
+ - use image_length + 1 (for BOS) instead of max_position_embeddings
+ - use custom FlaxBartDecoderLayerCollection
+ - embed_tokens cannot be None (issue at compile time)
+ """
+
===========changed ref 5===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoder(FlaxBartEncoder):
+ """
+ Edits:
+ - offset set to 0 (no padding token)
+ - use max_text_length instead of max_position_embeddings
+ - use custom FlaxBartEncoderLayerCollection
+ - embed_tokens cannot be None (issue at compile time)
+ """
+
===========changed ref 6===========
# module: dalle_mini.model.modeling
+ class FlaxBartModule(FlaxBartModule):
+ def setup(self):
+ encoder_embed_tokens = nn.Embed(
+ self.config.encoder_vocab_size,
+ self.config.d_model,
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+ decoder_embed_tokens = nn.Embed(
+ self.config.image_vocab_size + 1, # image vocab size + 1 for BOS
+ self.config.d_model,
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+
+ self.encoder = FlaxBartEncoder(
+ self.config, dtype=self.dtype, embed_tokens=encoder_embed_tokens
+ )
+ self.decoder = FlaxBartDecoder(
+ self.config, dtype=self.dtype, embed_tokens=decoder_embed_tokens
+ )
+
===========changed ref 7===========
# module: dalle_mini.model.modeling
+ class FlaxBartAttention(FlaxBartAttention):
- class FlaxBartAttention(nn.Module):
- def _merge_heads(self, hidden_states):
- return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
-
===========changed ref 8===========
# module: dalle_mini.model.modeling
+ class FlaxBartAttention(FlaxBartAttention):
- class FlaxBartAttention(nn.Module):
- def _split_heads(self, hidden_states):
- return hidden_states.reshape(
- hidden_states.shape[:2] + (self.num_heads, self.head_dim)
- )
-
===========changed ref 9===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoderLayer(FlaxBartEncoderLayer):
- class FlaxBartEncoderLayer(nn.Module):
- def __call__(
- self,
- hidden_states: jnp.ndarray,
- attention_mask: jnp.ndarray,
- deterministic: bool = True,
- ) -> Tuple[jnp.ndarray]:
- residual = hidden_states
- hidden_states = self.self_attn(
- hidden_states=hidden_states, attention_mask=attention_mask
- )
-
- hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
- hidden_states = residual + hidden_states
- hidden_states = self.self_attn_layer_norm(hidden_states)
-
- residual = hidden_states
- hidden_states = self.activation_fn(self.fc1(hidden_states))
- hidden_states = self.activation_dropout_layer(
- hidden_states, deterministic=deterministic
- )
- hidden_states = self.fc2(hidden_states)
- hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
- hidden_states = residual + hidden_states
- hidden_states = self.final_layer_norm(hidden_states)
-
- return hidden_states
-
===========changed ref 10===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoder(FlaxBartDecoder):
+ def setup(self):
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
+
+ embed_dim = self.config.d_model
+ self.padding_idx = self.config.pad_token_id
+ self.embed_scale = (
+ math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
+ )
+
+ # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
+ # and adjust num_embeddings appropriately. Other models don't have this hack
+ self.offset = 0
+ self.embed_positions = nn.Embed(
+ self.config.image_length + 1 + self.offset, # image length + 1 for BOS
+ embed_dim,
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+
+ self.layers = FlaxBartDecoderLayerCollection(self.config, self.dtype)
+ self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+
|
dalle_mini.model.modeling/FlaxBartDecoderLayer.setup
|
Modified
|
borisdayma~dalle-mini
|
972bc8d2443eab9ccdae4c1a33fbd0ccb1083d7f
|
refactor(model): inherit from HF Flax & simplify
|
<7>:<add> bias=False,
<13>:<add> self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
<del> self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype)
<19>:<add> bias=False,
<21>:<add> self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
<del> self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype)
<34>:<add> self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
<del> self.final
|
# module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayer(FlaxBartDecoderLayer):
- class FlaxBartDecoderLayer(nn.Module):
def setup(self) -> None:
<0> self.embed_dim = self.config.d_model
<1> self.self_attn = FlaxBartAttention(
<2> config=self.config,
<3> embed_dim=self.embed_dim,
<4> num_heads=self.config.decoder_attention_heads,
<5> dropout=self.config.attention_dropout,
<6> causal=True,
<7> dtype=self.dtype,
<8> )
<9> self.dropout_layer = nn.Dropout(rate=self.config.dropout)
<10> self.activation_fn = ACT2FN[self.config.activation_function]
<11> self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
<12>
<13> self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype)
<14> self.encoder_attn = FlaxBartAttention(
<15> config=self.config,
<16> embed_dim=self.embed_dim,
<17> num_heads=self.config.decoder_attention_heads,
<18> dropout=self.config.attention_dropout,
<19> dtype=self.dtype,
<20> )
<21> self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype)
<22> self.fc1 = nn.Dense(
<23> self.config.encoder_ffn_dim,
<24> dtype=self.dtype,
<25> use_bias=False,
<26> kernel_init=jax.nn.initializers.normal(self.config.init_std),
<27> )
<28> self.fc2 = nn.Dense(
<29> self.embed_dim,
<30> dtype=self.dtype,
<31> use_bias=False,
<32> kernel_init=jax.nn.initializers.normal(self.config.init_std),
<33> )
<34> self.final</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayer(FlaxBartDecoderLayer):
- class FlaxBartDecoderLayer(nn.Module):
def setup(self) -> None:
# offset: 1
===========unchanged ref 0===========
at: dalle_mini.model.modeling.FlaxBartForConditionalGenerationModule.setup
self.model = FlaxBartModule(config=self.config, dtype=self.dtype)
self.lm_head = nn.Dense(
self.config.image_vocab_size + 1, # image vocab size + 1 for BOS
use_bias=False,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
at: transformers.configuration_utils.PretrainedConfig.__init__
self.tie_word_embeddings = kwargs.pop(
"tie_word_embeddings", True
) # Whether input and output word embeddings should be tied for all MLM, LM and Seq2Seq models.
at: transformers.modeling_flax_outputs
FlaxSeq2SeqLMOutput(**kwargs: _VT)
FlaxSeq2SeqLMOutput(map: Mapping[_KT, _VT], **kwargs: _VT)
FlaxSeq2SeqLMOutput(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
at: transformers.models.bart.modeling_flax_bart.FlaxBartForConditionalGenerationModule
config: BartConfig
dtype: jnp.dtype = jnp.float32
bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros
===========changed ref 0===========
# module: dalle_mini.model.modeling
+ class FlaxBartPreTrainedModel(FlaxBartPreTrainedModel):
+ """
+ Edits:
+ - added num_params property
+ """
+
===========changed ref 1===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayer(FlaxBartDecoderLayer):
- class FlaxBartDecoderLayer(nn.Module):
+ """
+ Edits:
+ - no bias
+ - uses custom FlaxBartAttention
+ """
- config: DalleBartConfig
- dtype: jnp.dtype = jnp.float32
===========changed ref 2===========
# module: dalle_mini.model.modeling
+ class FlaxBartForConditionalGenerationModule(FlaxBartForConditionalGenerationModule):
+ """
+ Edits:
+ - no bias
+ - lm_head set to image_vocab_size + 1 (for BOS)
+ - uses custom FlaxBartModule
+ """
+
===========changed ref 3===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoderLayerCollection(FlaxBartEncoderLayerCollection):
- class FlaxBartEncoderLayerCollection(nn.Module):
- def __call__(
- self,
- hidden_states,
- attention_mask,
- deterministic: bool = True,
- ):
- for encoder_layer in self.layers:
- hidden_states = encoder_layer(
- hidden_states,
- attention_mask,
- deterministic,
- )
-
- return FlaxBaseModelOutput(last_hidden_state=hidden_states)
-
===========changed ref 4===========
# module: dalle_mini.model.modeling
+ class FlaxBartPreTrainedModel(FlaxBartPreTrainedModel):
+ @property
+ def num_params(self):
+ num_params = jax.tree_map(
+ lambda param: param.size, flatten_dict(unfreeze(self.params))
+ ).values()
+ return sum(list(num_params))
+
===========changed ref 5===========
# module: dalle_mini.model.modeling
+ class FlaxBartModule(FlaxBartModule):
+ """
+ Edits
+ - use custom FlaxBartEncoder & FlaxBartDecoder
+ - use separate embeddings for Encoder & Decoder
+ """
+
===========changed ref 6===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoderLayerCollection(FlaxBartEncoderLayerCollection):
- class FlaxBartEncoderLayerCollection(nn.Module):
+ """
+ Edits:
+ - use custom FlaxBartEncoderLayer
+ - allow Gradient Checkpointing (nn.remat)
+ """
- config: DalleBartConfig
- dtype: jnp.dtype = jnp.float32 # the dtype of the computation
===========changed ref 7===========
# module: dalle_mini.model.modeling
+ class FlaxBartForConditionalGenerationModule(FlaxBartForConditionalGenerationModule):
+ def setup(self):
+ self.model = FlaxBartModule(config=self.config, dtype=self.dtype)
+ self.lm_head = nn.Dense(
+ self.config.image_vocab_size + 1, # image vocab size + 1 for BOS
+ use_bias=False,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+
===========changed ref 8===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoderLayerCollection(FlaxBartEncoderLayerCollection):
- class FlaxBartEncoderLayerCollection(nn.Module):
def setup(self):
layer_module = (
nn.remat(FlaxBartEncoderLayer)
if self.config.gradient_checkpointing
else FlaxBartEncoderLayer
)
self.layers = [
layer_module(self.config, name=str(i), dtype=self.dtype)
for i in range(self.config.encoder_layers)
]
+ self.layerdrop = self.config.encoder_layerdrop
===========changed ref 9===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoderLayer(FlaxBartEncoderLayer):
- class FlaxBartEncoderLayer(nn.Module):
+ """
+ Edits:
+ - no bias
+ - use custom FlaxBartAttention
+ """
- config: DalleBartConfig
- dtype: jnp.dtype = jnp.float32
===========changed ref 10===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoder(FlaxBartDecoder):
+ """
+ Edits:
+ - offset set to 0 (no padding token)
+ - use image_length + 1 (for BOS) instead of max_position_embeddings
+ - use custom FlaxBartDecoderLayerCollection
+ - embed_tokens cannot be None (issue at compile time)
+ """
+
===========changed ref 11===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoder(FlaxBartEncoder):
+ """
+ Edits:
+ - offset set to 0 (no padding token)
+ - use max_text_length instead of max_position_embeddings
+ - use custom FlaxBartEncoderLayerCollection
+ - embed_tokens cannot be None (issue at compile time)
+ """
+
|
dalle_mini.model.modeling/FlaxBartDecoderLayerCollection.setup
|
Modified
|
borisdayma~dalle-mini
|
972bc8d2443eab9ccdae4c1a33fbd0ccb1083d7f
|
refactor(model): inherit from HF Flax & simplify
|
<9>:<add> self.layerdrop = self.config.decoder_layerdrop
|
# module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayerCollection(FlaxBartDecoderLayerCollection):
- class FlaxBartDecoderLayerCollection(nn.Module):
def setup(self):
<0> layer_module = (
<1> nn.remat(FlaxBartDecoderLayer)
<2> if self.config.gradient_checkpointing
<3> else FlaxBartDecoderLayer
<4> )
<5> self.layers = [
<6> layer_module(self.config, name=str(i), dtype=self.dtype)
<7> for i in range(self.config.decoder_layers)
<8> ]
<9>
|
===========changed ref 0===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayerCollection(FlaxBartDecoderLayerCollection):
- class FlaxBartDecoderLayerCollection(nn.Module):
+ """
+ Edits:
+ - use custom FlaxBartDecoderLayer
+ - allow Gradient Checkpointing (nn.remat)
+ """
- config: DalleBartConfig
- dtype: jnp.dtype = jnp.float32 # the dtype of the computation
===========changed ref 1===========
# module: dalle_mini.model.modeling
+ class DalleBart(FlaxBartPreTrainedModel, FlaxBartForConditionalGeneration):
+ """
+ Edits:
+ - renamed from FlaxBartForConditionalGeneration
+ - uses custom FlaxBartPreTrainedModel
+ - uses custom FlaxBartForConditionalGenerationModule
+ - no bias in decode method
+ """
+
+ module_class = FlaxBartForConditionalGenerationModule
+
===========changed ref 2===========
# module: dalle_mini.model.modeling
+ class FlaxBartPreTrainedModel(FlaxBartPreTrainedModel):
+ """
+ Edits:
+ - added num_params property
+ """
+
===========changed ref 3===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayer(FlaxBartDecoderLayer):
- class FlaxBartDecoderLayer(nn.Module):
+ """
+ Edits:
+ - no bias
+ - uses custom FlaxBartAttention
+ """
- config: DalleBartConfig
- dtype: jnp.dtype = jnp.float32
===========changed ref 4===========
# module: dalle_mini.model.modeling
+ class FlaxBartForConditionalGenerationModule(FlaxBartForConditionalGenerationModule):
+ """
+ Edits:
+ - no bias
+ - lm_head set to image_vocab_size + 1 (for BOS)
+ - uses custom FlaxBartModule
+ """
+
===========changed ref 5===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoderLayerCollection(FlaxBartEncoderLayerCollection):
- class FlaxBartEncoderLayerCollection(nn.Module):
- def __call__(
- self,
- hidden_states,
- attention_mask,
- deterministic: bool = True,
- ):
- for encoder_layer in self.layers:
- hidden_states = encoder_layer(
- hidden_states,
- attention_mask,
- deterministic,
- )
-
- return FlaxBaseModelOutput(last_hidden_state=hidden_states)
-
===========changed ref 6===========
# module: dalle_mini.model.modeling
+ class FlaxBartPreTrainedModel(FlaxBartPreTrainedModel):
+ @property
+ def num_params(self):
+ num_params = jax.tree_map(
+ lambda param: param.size, flatten_dict(unfreeze(self.params))
+ ).values()
+ return sum(list(num_params))
+
===========changed ref 7===========
# module: dalle_mini.model.modeling
+ class FlaxBartModule(FlaxBartModule):
+ """
+ Edits
+ - use custom FlaxBartEncoder & FlaxBartDecoder
+ - use separate embeddings for Encoder & Decoder
+ """
+
===========changed ref 8===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoderLayerCollection(FlaxBartEncoderLayerCollection):
- class FlaxBartEncoderLayerCollection(nn.Module):
+ """
+ Edits:
+ - use custom FlaxBartEncoderLayer
+ - allow Gradient Checkpointing (nn.remat)
+ """
- config: DalleBartConfig
- dtype: jnp.dtype = jnp.float32 # the dtype of the computation
===========changed ref 9===========
# module: dalle_mini.model.modeling
+ class FlaxBartForConditionalGenerationModule(FlaxBartForConditionalGenerationModule):
+ def setup(self):
+ self.model = FlaxBartModule(config=self.config, dtype=self.dtype)
+ self.lm_head = nn.Dense(
+ self.config.image_vocab_size + 1, # image vocab size + 1 for BOS
+ use_bias=False,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+
===========changed ref 10===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoderLayerCollection(FlaxBartEncoderLayerCollection):
- class FlaxBartEncoderLayerCollection(nn.Module):
def setup(self):
layer_module = (
nn.remat(FlaxBartEncoderLayer)
if self.config.gradient_checkpointing
else FlaxBartEncoderLayer
)
self.layers = [
layer_module(self.config, name=str(i), dtype=self.dtype)
for i in range(self.config.encoder_layers)
]
+ self.layerdrop = self.config.encoder_layerdrop
===========changed ref 11===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoderLayer(FlaxBartEncoderLayer):
- class FlaxBartEncoderLayer(nn.Module):
+ """
+ Edits:
+ - no bias
+ - use custom FlaxBartAttention
+ """
- config: DalleBartConfig
- dtype: jnp.dtype = jnp.float32
===========changed ref 12===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoder(FlaxBartDecoder):
+ """
+ Edits:
+ - offset set to 0 (no padding token)
+ - use image_length + 1 (for BOS) instead of max_position_embeddings
+ - use custom FlaxBartDecoderLayerCollection
+ - embed_tokens cannot be None (issue at compile time)
+ """
+
===========changed ref 13===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoder(FlaxBartEncoder):
+ """
+ Edits:
+ - offset set to 0 (no padding token)
+ - use max_text_length instead of max_position_embeddings
+ - use custom FlaxBartEncoderLayerCollection
+ - embed_tokens cannot be None (issue at compile time)
+ """
+
===========changed ref 14===========
# module: dalle_mini.model.modeling
+ class FlaxBartModule(FlaxBartModule):
+ def setup(self):
+ encoder_embed_tokens = nn.Embed(
+ self.config.encoder_vocab_size,
+ self.config.d_model,
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+ decoder_embed_tokens = nn.Embed(
+ self.config.image_vocab_size + 1, # image vocab size + 1 for BOS
+ self.config.d_model,
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+
+ self.encoder = FlaxBartEncoder(
+ self.config, dtype=self.dtype, embed_tokens=encoder_embed_tokens
+ )
+ self.decoder = FlaxBartDecoder(
+ self.config, dtype=self.dtype, embed_tokens=decoder_embed_tokens
+ )
+
===========changed ref 15===========
# module: dalle_mini.model.modeling
+ class FlaxBartAttention(FlaxBartAttention):
- class FlaxBartAttention(nn.Module):
- def _merge_heads(self, hidden_states):
- return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
-
|
dalle_mini.model.configuration/DalleBartConfig.__init__
|
Modified
|
borisdayma~dalle-mini
|
972bc8d2443eab9ccdae4c1a33fbd0ccb1083d7f
|
refactor(model): inherit from HF Flax & simplify
|
<26>:<del> self.decoder_start_token_id = image_vocab_size # BOS appended to vocab
|
<s>.02,
classifier_dropout=0.0,
scale_embedding=False,
gradient_checkpointing=False,
use_cache=True,
- num_labels=3,
is_encoder_decoder=True,
forced_eos_token_id=None,
+ tie_word_embeddings=False, # different modalities and sizes
- tie_word_embeddings=False, # don't tie for scaling reasons and due to different modalities and sizes
**kwargs,
):
<0> self.normalize_text = normalize_text
<1> self.encoder_vocab_size = encoder_vocab_size
<2> self.image_vocab_size = image_vocab_size
<3> self.image_length = image_length
<4> self.max_text_length = max_text_length
<5> self.d_model = d_model
<6> self.encoder_ffn_dim = encoder_ffn_dim
<7> self.encoder_layers = encoder_layers
<8> self.encoder_attention_heads = encoder_attention_heads
<9> self.decoder_ffn_dim = decoder_ffn_dim
<10> self.decoder_layers = decoder_layers
<11> self.decoder_attention_heads = decoder_attention_heads
<12> self.dropout = dropout
<13> self.attention_dropout = attention_dropout
<14> self.activation_dropout = activation_dropout
<15> self.activation_function = activation_function
<16> self.init_std = init_std
<17> self.encoder_layerdrop = encoder_layerdrop
<18> self.decoder_layerdrop = decoder_layerdrop
<19> self.classifier_dropout = classifier_dropout
<20> self.use_cache = use_cache
<21> self.num_hidden_layers = encoder_layers
<22> self.gradient_checkpointing = gradient_checkpointing
<23> self.scale_embedding = (
<24> scale_embedding # scale factor will be sqrt(d_model) if True
<25> )
<26> self.decoder_start_token_id = image_vocab_size # BOS appended to vocab
<27> self.min_length = image_length</s>
|
===========below chunk 0===========
<s> classifier_dropout=0.0,
scale_embedding=False,
gradient_checkpointing=False,
use_cache=True,
- num_labels=3,
is_encoder_decoder=True,
forced_eos_token_id=None,
+ tie_word_embeddings=False, # different modalities and sizes
- tie_word_embeddings=False, # don't tie for scaling reasons and due to different modalities and sizes
**kwargs,
):
# offset: 1
self.max_length = image_length + 1
# remove keys we are about to set to prevent errors
for k in [
"bos_token_id",
"eos_token_id",
"pad_token_id",
"decoder_start_token_id",
"forced_eos_token_id",
]:
kwargs.pop(k, None)
super().__init__(
num_labels=num_labels,
pad_token_id=image_vocab_size
+ 1, # needed to avoid errors during generation (converted to jnp.array)
bos_token_id=image_vocab_size + 1, # set to unreachable values
eos_token_id=image_vocab_size + 1,
is_encoder_decoder=is_encoder_decoder,
decoder_start_token_id=self.decoder_start_token_id,
forced_eos_token_id=forced_eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get(
"force_bos_token_to_be_generated", False
):
self.forced_bos_token_id = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions."</s>
===========below chunk 1===========
<s> classifier_dropout=0.0,
scale_embedding=False,
gradient_checkpointing=False,
use_cache=True,
- num_labels=3,
is_encoder_decoder=True,
forced_eos_token_id=None,
+ tie_word_embeddings=False, # different modalities and sizes
- tie_word_embeddings=False, # don't tie for scaling reasons and due to different modalities and sizes
**kwargs,
):
# offset: 2
<s>"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions."
"The config can simply be saved and uploaded again to be fixed."
)
===========unchanged ref 0===========
at: _warnings
warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None
warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None
at: transformers.configuration_utils.PretrainedConfig
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
_auto_class: Optional[str] = None
__init__(**kwargs)
at: transformers.configuration_utils.PretrainedConfig.__init__
self.bos_token_id = kwargs.pop("bos_token_id", None)
at: typing.Mapping
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
get(key: _KT) -> Optional[_VT_co]
at: typing.MutableMapping
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
pop(key: _KT) -> _VT
===========changed ref 0===========
# module: dalle_mini.model.modeling
- class DalleBartModule(nn.Module):
- def _get_decoder_module(self):
- return self.decoder
-
===========changed ref 1===========
# module: dalle_mini.model.modeling
- class DalleBartModule(nn.Module):
- def _get_encoder_module(self):
- return self.encoder
-
===========changed ref 2===========
# module: dalle_mini.model.modeling
- class DalleBartForConditionalGenerationModule(nn.Module):
- def _get_decoder_module(self):
- return self.model.decoder
-
===========changed ref 3===========
# module: dalle_mini.model.modeling
- class DalleBartForConditionalGenerationModule(nn.Module):
- def _get_encoder_module(self):
- return self.model.encoder
-
===========changed ref 4===========
# module: dalle_mini.model.modeling
+ class FlaxBartPreTrainedModel(FlaxBartPreTrainedModel):
+ """
+ Edits:
+ - added num_params property
+ """
+
===========changed ref 5===========
# module: dalle_mini.model.modeling
+ class FlaxBartAttention(FlaxBartAttention):
- class FlaxBartAttention(nn.Module):
- def _merge_heads(self, hidden_states):
- return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
-
===========changed ref 6===========
# module: dalle_mini.model.modeling
- class DalleBartForConditionalGeneration(DalleBartPreTrainedModel):
- module_class = DalleBartForConditionalGenerationModule
- dtype: jnp.dtype = jnp.float32
-
===========changed ref 7===========
# module: dalle_mini.model.modeling
- class DalleBartModule(nn.Module):
- config: DalleBartConfig
- dtype: jnp.dtype = jnp.float32 # the dtype of the computation
-
===========changed ref 8===========
# module: dalle_mini.model.modeling
- class DalleBartDecoder(nn.Module):
- config: DalleBartConfig
- dtype: jnp.dtype = jnp.float32 # the dtype of the computation
-
===========changed ref 9===========
# module: dalle_mini.model.modeling
- class DalleBartEncoder(nn.Module):
- config: DalleBartConfig
- dtype: jnp.dtype = jnp.float32 # the dtype of the computation
-
===========changed ref 10===========
# module: dalle_mini.model.modeling
+ class FlaxBartModule(FlaxBartModule):
+ """
+ Edits
+ - use custom FlaxBartEncoder & FlaxBartDecoder
+ - use separate embeddings for Encoder & Decoder
+ """
+
===========changed ref 11===========
# module: dalle_mini.model.modeling
+ class FlaxBartAttention(FlaxBartAttention):
- class FlaxBartAttention(nn.Module):
- def _split_heads(self, hidden_states):
- return hidden_states.reshape(
- hidden_states.shape[:2] + (self.num_heads, self.head_dim)
- )
-
===========changed ref 12===========
# module: dalle_mini.model.modeling
- class DalleBartPreTrainedModel(FlaxPreTrainedModel):
- config_class = DalleBartConfig
- base_model_prefix: str = "dallebart"
- module_class: nn.Module = None
-
===========changed ref 13===========
# module: dalle_mini.model.modeling
- class DalleBartModule(nn.Module):
- def setup(self):
- self.encoder = DalleBartEncoder(self.config, dtype=self.dtype)
- self.decoder = DalleBartDecoder(self.config, dtype=self.dtype)
-
|
dalle_mini.model.modeling/FlaxBartAttention.setup
|
Modified
|
borisdayma~dalle-mini
|
8654dc998902b1979389bd7275caaa9927f27c16
|
fix: causal_mask based on image tokens
|
<21>:<add> # used only in decoder
<22>:<add> jnp.ones((1, self.config.image_length + 1), dtype="bool"), dtype="bool"
<del> jnp.ones((1, self.embed_dim), dtype="bool"), dtype="bool"
|
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def setup(self) -> None:
<0> self.head_dim = self.embed_dim // self.num_heads
<1> if self.head_dim * self.num_heads != self.embed_dim:
<2> raise ValueError(
<3> f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
<4> f" and `num_heads`: {self.num_heads})."
<5> )
<6>
<7> dense = partial(
<8> nn.Dense,
<9> self.embed_dim,
<10> use_bias=self.bias,
<11> dtype=self.dtype,
<12> kernel_init=jax.nn.initializers.normal(self.config.init_std),
<13> )
<14>
<15> self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
<16> self.out_proj = dense()
<17>
<18> self.dropout_layer = nn.Dropout(rate=self.dropout)
<19>
<20> if self.causal:
<21> self.causal_mask = make_causal_mask(
<22> jnp.ones((1, self.embed_dim), dtype="bool"), dtype="bool"
<23> )
<24>
|
===========unchanged ref 0===========
at: functools
partial(func: Callable[..., _T], *args: Any, **kwargs: Any)
partial(func, *args, **keywords, /) -> function with partial application()
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.init_std = init_std
at: transformers.models.bart.modeling_flax_bart.FlaxBartAttention
config: BartConfig
embed_dim: int
num_heads: int
dropout: float = 0.0
causal: bool = False
bias: bool = True
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
setup(self) -> None
|
dalle_mini.model.configuration/DalleBartConfig.__init__
|
Modified
|
borisdayma~dalle-mini
|
eb24dbcf65d07edc8890f4c4194d24953b79cdf9
|
fix(config): set min/max for generation
|
<26>:<del> self.min_length = image_length + 1
<27>:<del> self.max_length = image_length + 1
|
<s>1024,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
classifier_dropout=0.0,
scale_embedding=False,
gradient_checkpointing=False,
use_cache=True,
is_encoder_decoder=True,
forced_eos_token_id=None,
tie_word_embeddings=False, # different modalities and sizes
**kwargs,
):
<0> self.normalize_text = normalize_text
<1> self.encoder_vocab_size = encoder_vocab_size
<2> self.image_vocab_size = image_vocab_size
<3> self.image_length = image_length
<4> self.max_text_length = max_text_length
<5> self.d_model = d_model
<6> self.encoder_ffn_dim = encoder_ffn_dim
<7> self.encoder_layers = encoder_layers
<8> self.encoder_attention_heads = encoder_attention_heads
<9> self.decoder_ffn_dim = decoder_ffn_dim
<10> self.decoder_layers = decoder_layers
<11> self.decoder_attention_heads = decoder_attention_heads
<12> self.dropout = dropout
<13> self.attention_dropout = attention_dropout
<14> self.activation_dropout = activation_dropout
<15> self.activation_function = activation_function
<16> self.init_std = init_std
<17> self.encoder_layerdrop = encoder_layerdrop
<18> self.decoder_layerdrop = decoder_layerdrop
<19> self.classifier_dropout = classifier_dropout
<20> self.use_cache = use_cache
<21> self.num_hidden_layers = encoder_layers
<22> self.gradient_checkpointing = gradient_checkpointing
<23> self.scale_embedding = (
<24> scale_embedding # scale factor will be sqrt(d_model) if True
<25> )
<26> self.min_length = image_length + 1
<27> self.max_length = image_length + 1
<28>
<29> # remove inferred keys</s>
|
===========below chunk 0===========
<s> dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
classifier_dropout=0.0,
scale_embedding=False,
gradient_checkpointing=False,
use_cache=True,
is_encoder_decoder=True,
forced_eos_token_id=None,
tie_word_embeddings=False, # different modalities and sizes
**kwargs,
):
# offset: 1
for k in [
"pad_token_id",
"bos_token_id",
"eos_token_id",
"decoder_start_token_id",
]:
kwargs.pop(k, None)
super().__init__(
pad_token_id=image_vocab_size
+ 1, # needed to avoid errors during generation (converted to jnp.array)
bos_token_id=image_vocab_size + 1, # set to unreachable values
eos_token_id=image_vocab_size + 1,
is_encoder_decoder=is_encoder_decoder,
decoder_start_token_id=image_vocab_size, # BOS appended to vocab
forced_eos_token_id=forced_eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get(
"force_bos_token_to_be_generated", False
):
self.forced_bos_token_id = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions."
"The config can simply be saved and uploaded again to be fixed."
)
===========unchanged ref 0===========
at: _warnings
warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None
warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None
at: transformers.configuration_utils.PretrainedConfig
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
_auto_class: Optional[str] = None
__init__(**kwargs)
__init__(self, **kwargs)
at: transformers.configuration_utils.PretrainedConfig.__init__
self.bos_token_id = kwargs.pop("bos_token_id", None)
at: typing.Mapping
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
get(key: _KT) -> Optional[_VT_co]
at: typing.MutableMapping
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
pop(key: _KT) -> _VT
|
dalle_mini.model.configuration/DalleBartConfig.__init__
|
Modified
|
borisdayma~dalle-mini
|
53dade7fd869d7f66821a7179e9f508655b9afd0
|
feat: minor improvements
|
<21>:<del> self.num_hidden_layers = encoder_layers
|
<s>1024,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
classifier_dropout=0.0,
scale_embedding=False,
gradient_checkpointing=False,
use_cache=True,
is_encoder_decoder=True,
forced_eos_token_id=None,
tie_word_embeddings=False, # different modalities and sizes
**kwargs,
):
<0> self.normalize_text = normalize_text
<1> self.encoder_vocab_size = encoder_vocab_size
<2> self.image_vocab_size = image_vocab_size
<3> self.image_length = image_length
<4> self.max_text_length = max_text_length
<5> self.d_model = d_model
<6> self.encoder_ffn_dim = encoder_ffn_dim
<7> self.encoder_layers = encoder_layers
<8> self.encoder_attention_heads = encoder_attention_heads
<9> self.decoder_ffn_dim = decoder_ffn_dim
<10> self.decoder_layers = decoder_layers
<11> self.decoder_attention_heads = decoder_attention_heads
<12> self.dropout = dropout
<13> self.attention_dropout = attention_dropout
<14> self.activation_dropout = activation_dropout
<15> self.activation_function = activation_function
<16> self.init_std = init_std
<17> self.encoder_layerdrop = encoder_layerdrop
<18> self.decoder_layerdrop = decoder_layerdrop
<19> self.classifier_dropout = classifier_dropout
<20> self.use_cache = use_cache
<21> self.num_hidden_layers = encoder_layers
<22> self.gradient_checkpointing = gradient_checkpointing
<23> self.scale_embedding = (
<24> scale_embedding # scale factor will be sqrt(d_model) if True
<25> )
<26>
<27> # remove inferred keys to prevent errors when loading config (passed as kwargs)
<28> for k in [
<29> "pad_token_id",</s>
|
===========below chunk 0===========
<s> dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
classifier_dropout=0.0,
scale_embedding=False,
gradient_checkpointing=False,
use_cache=True,
is_encoder_decoder=True,
forced_eos_token_id=None,
tie_word_embeddings=False, # different modalities and sizes
**kwargs,
):
# offset: 1
"eos_token_id",
"decoder_start_token_id",
"min_length",
"max_length",
]:
kwargs.pop(k, None)
super().__init__(
pad_token_id=image_vocab_size
+ 1, # needed to avoid errors during generation (converted to jnp.array)
bos_token_id=image_vocab_size + 1, # set to unreachable values
eos_token_id=image_vocab_size + 1,
is_encoder_decoder=is_encoder_decoder,
decoder_start_token_id=image_vocab_size, # BOS appended to vocab
forced_eos_token_id=forced_eos_token_id,
tie_word_embeddings=tie_word_embeddings,
min_length=image_length + 1,
max_length=image_length + 1,
**kwargs,
)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get(
"force_bos_token_to_be_generated", False
):
self.forced_bos_token_id = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions."
"The config can simply be saved and uploaded again to be fixed."
)
===========unchanged ref 0===========
at: _warnings
warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None
warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None
at: transformers.configuration_utils.PretrainedConfig
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
_auto_class: Optional[str] = None
__init__(**kwargs)
__init__(self, **kwargs)
at: transformers.configuration_utils.PretrainedConfig.__init__
self.bos_token_id = kwargs.pop("bos_token_id", None)
at: typing.Mapping
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
get(key: _KT) -> Optional[_VT_co]
at: typing.MutableMapping
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
pop(key: _KT) -> _VT
|
dalle_mini.data/Dataset.__post_init__
|
Modified
|
borisdayma~dalle-mini
|
5ee6e60837d1ff5d9f822a15ef7a9f5c646bd80c
|
feat(data): accept braceexpand notation
|
<2>:<add> # accept braceexpand notation
<add> for k in ["train_file", "validation_file"]:
<add> f = getattr(self, k)
<add> if isinstance(f, str):
<add> setattr(self, k, list(braceexpand(f)))
|
# module: dalle_mini.data
@dataclass
class Dataset:
def __post_init__(self):
<0> # define data_files
<1> if self.train_file is not None or self.validation_file is not None:
<2> data_files = {
<3> "train": self.train_file,
<4> "validation": self.validation_file,
<5> }
<6> else:
<7> data_files = None
<8>
<9> # load dataset
<10> dataset = load_dataset(
<11> self.dataset_repo_or_path,
<12> data_files=data_files,
<13> streaming=self.streaming,
<14> use_auth_token=self.use_auth_token,
<15> )
<16> if self.do_train:
<17> if "train" not in dataset:
<18> raise ValueError("Training requires a training dataset")
<19> self.train_dataset = dataset["train"]
<20> if self.max_train_samples is not None:
<21> self.train_dataset = (
<22> self.train_dataset.take(self.max_train_samples)
<23> if self.streaming
<24> else self.train_dataset.select(range(self.max_train_samples))
<25> )
<26> if self.do_eval:
<27> if "validation" not in dataset:
<28> raise ValueError("Evaluating requires a validation dataset")
<29> self.eval_dataset = dataset["validation"]
<30> if self.max_eval_samples is not None:
<31> self.eval_dataset = (
<32> self.eval_dataset.take(self.max_eval_samples)
<33> if self.streaming
<34> else self.eval_dataset.select(range(self.max_eval_samples))
<35> )
<36>
|
===========unchanged ref 0===========
at: dalle_mini.data.Dataset
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
at: dalle_mini.data.Dataset.preprocess
self.train_dataset = self.train_dataset.shuffle(1000, self.seed_dataset)
at: datasets.arrow_dataset.Dataset
wrapper(*, keep_in_memory: bool=False, indices_cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, new_fingerprint: Optional[str]=None)
===========unchanged ref 1===========
at: datasets.load
load_dataset(path: str, name: Optional[str]=None, data_dir: Optional[str]=None, data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]]=None, split: Optional[Union[str, Split]]=None, cache_dir: Optional[str]=None, features: Optional[Features]=None, download_config: Optional[DownloadConfig]=None, download_mode: Optional[DownloadMode]=None, ignore_verifications: bool=False, keep_in_memory: Optional[bool]=None, save_infos: bool=False, revision: Optional[Union[str, Version]]=None, use_auth_token: Optional[Union[bool, str]]=None, task: Optional[Union[str, TaskTemplate]]=None, streaming: bool=False, num_proc: Optional[int]=None, *, num_process: int=1, process_id: int=0, seed: Optional[int]=None, experiment_id: Optional[str]=None, max_concurrent_cache_files: int=10000, timeout: Union[int, float]=100, base_path: Optional[str]=None, info: Optional[DatasetInfo]=None, repo_id: Optional[str]=None, **kwargs) -> Union[DatasetDict, Dataset, IterableDatasetDict, IterableDataset]
|
dalle_mini.data/Dataset.__post_init__
|
Modified
|
borisdayma~dalle-mini
|
ed93c8ab114018f3fb7b870b60e50ca4cccfcd1a
|
feat: split shards by host
|
<0>:<add> self.multi_hosts = jax.process_count > 1
<7>:<add> # for list of files, split training data shards by host
<add> if isinstance(self.train_file, list) and self.multi_hosts:
<add> self.train_file = self.train_file[
<add> jax.process_index() :: jax.process_count()
<add> ]
|
# module: dalle_mini.data
@dataclass
class Dataset:
def __post_init__(self):
<0> # define data_files
<1> if self.train_file is not None or self.validation_file is not None:
<2> # accept braceexpand notation
<3> for k in ["train_file", "validation_file"]:
<4> f = getattr(self, k)
<5> if isinstance(f, str):
<6> setattr(self, k, list(braceexpand(f)))
<7> data_files = {
<8> "train": self.train_file,
<9> "validation": self.validation_file,
<10> }
<11> else:
<12> data_files = None
<13>
<14> # load dataset
<15> dataset = load_dataset(
<16> self.dataset_repo_or_path,
<17> data_files=data_files,
<18> streaming=self.streaming,
<19> use_auth_token=self.use_auth_token,
<20> )
<21> if self.do_train:
<22> if "train" not in dataset:
<23> raise ValueError("Training requires a training dataset")
<24> self.train_dataset = dataset["train"]
<25> if self.max_train_samples is not None:
<26> self.train_dataset = (
<27> self.train_dataset.take(self.max_train_samples)
<28> if self.streaming
<29> else self.train_dataset.select(range(self.max_train_samples))
<30> )
<31> if self.do_eval:
<32> if "validation" not in dataset:
<33> raise ValueError("Evaluating requires a validation dataset")
<34> self.eval_dataset = dataset["validation"]
<35> if self.max_eval_samples is not None:
<36> self.eval_dataset = (
<37> self.eval_dataset.take(self.max_eval_samples)
<38> if self.streaming
<39> else self.eval_dataset.select(range(self.max_eval_samples))
<40> )
<41>
|
===========unchanged ref 0===========
at: dalle_mini.data.Dataset
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
multi_hosts: bool = field(init=False)
at: dalle_mini.data.Dataset.preprocess
self.train_dataset = self.train_dataset.shuffle(1000, self.seed_dataset)
at: datasets.arrow_dataset.Dataset
wrapper(*, keep_in_memory: bool=False, indices_cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, new_fingerprint: Optional[str]=None)
===========unchanged ref 1===========
at: datasets.load
load_dataset(path: str, name: Optional[str]=None, data_dir: Optional[str]=None, data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]]=None, split: Optional[Union[str, Split]]=None, cache_dir: Optional[str]=None, features: Optional[Features]=None, download_config: Optional[DownloadConfig]=None, download_mode: Optional[DownloadMode]=None, ignore_verifications: bool=False, keep_in_memory: Optional[bool]=None, save_infos: bool=False, revision: Optional[Union[str, Version]]=None, use_auth_token: Optional[Union[bool, str]]=None, task: Optional[Union[str, TaskTemplate]]=None, streaming: bool=False, num_proc: Optional[int]=None, *, base_path: Optional[str]=None, info: Optional[DatasetInfo]=None, repo_id: Optional[str]=None, num_process: int=1, process_id: int=0, seed: Optional[int]=None, experiment_id: Optional[str]=None, max_concurrent_cache_files: int=10000, timeout: Union[int, float]=100, **kwargs) -> Union[DatasetDict, Dataset, IterableDatasetDict, IterableDataset]
===========changed ref 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
+ multi_hosts: bool = field(init=False)
|
dalle_mini.data/Dataset.dataloader
|
Modified
|
borisdayma~dalle-mini
|
ed93c8ab114018f3fb7b870b60e50ca4cccfcd1a
|
feat: split shards by host
|
<27>:<add> def _dataloader_datasets_streaming(
<add> dataset: Dataset, batch_size: int, epoch: int
<add> ):
<add> # epoch is only use for multi-host
<del> def _dataloader_datasets_streaming(dataset: Dataset, batch_size: int):
<30>:<add> first_loop = True
<add> while self.multi_hosts or first_loop:
<add> # in multi-host, we run forever (no epoch) as hosts need to stop
<add> # at same the time and we don't know how much data is on each host
<add> if not first_loop:
<add> # multi-host setting, we reshuffle shards
<add> epoch += 1
<add> dataset.set_epoch(epoch)
<add> for item in dataset:
<del> for item in dataset:
<31>:<add> for k, v in item.items():
<del> for k, v in item.items():
<32>:<add> batch[k].append(v)
<del> batch[k].append(v)
<33>:<add> if len(batch[keys[0]]) == batch_size:
<del> if len(batch[keys[0]]) == batch_size:
<34>:<add> batch = {k: jnp.array(v) for k
|
# module: dalle_mini.data
@dataclass
class Dataset:
def dataloader(self, split, batch_size, epoch=None):
<0> def _dataloader_datasets_non_streaming(
<1> dataset: Dataset,
<2> batch_size: int,
<3> rng: jax.random.PRNGKey = None,
<4> ):
<5> """
<6> Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices.
<7> Shuffle batches if `shuffle` is `True`.
<8> """
<9> steps_per_epoch = len(dataset) // batch_size
<10>
<11> if rng is not None:
<12> batch_idx = jax.random.permutation(rng, len(dataset))
<13> else:
<14> batch_idx = jnp.arange(len(dataset))
<15>
<16> batch_idx = batch_idx[
<17> : steps_per_epoch * batch_size
<18> ] # Skip incomplete batch.
<19> batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))
<20>
<21> for idx in batch_idx:
<22> batch = dataset[idx]
<23> batch = {k: jnp.array(v) for k, v in batch.items()}
<24> batch = shard(batch)
<25> yield batch
<26>
<27> def _dataloader_datasets_streaming(dataset: Dataset, batch_size: int):
<28> keys = ["input_ids", "attention_mask", "labels", "decoder_input_ids"]
<29> batch = {k: [] for k in keys}
<30> for item in dataset:
<31> for k, v in item.items():
<32> batch[k].append(v)
<33> if len(batch[keys[0]]) == batch_size:
<34> batch = {k: jnp.array(v) for k, v in batch.items()}
<35> batch = shard(batch)
<36> yield batch
<37> batch = {k: [] for k in keys}
<38>
<39> if split == "train":
<40> ds = self.</s>
|
===========below chunk 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
def dataloader(self, split, batch_size, epoch=None):
# offset: 1
elif split == "eval":
ds = self.eval_dataset
else:
raise ValueError(f'split must be "train" or "eval", got {split}')
if self.streaming:
if split == "train":
ds.set_epoch(epoch)
return _dataloader_datasets_streaming(ds, batch_size)
else:
if split == "train":
self.rng_dataset, input_rng = jax.random.split(self.rng_dataset)
return _dataloader_datasets_non_streaming(ds, batch_size, input_rng)
===========unchanged ref 0===========
at: dalle_mini.data
Dataset(dataset_repo_or_path: str, train_file: str=None, validation_file: str=None, streaming: bool=True, use_auth_token: bool=False, text_column: str="caption", encoding_column: str="encoding", max_train_samples: int=None, max_eval_samples: int=None, preprocessing_num_workers: int=None, overwrite_cache: bool=False, do_train: bool=False, do_eval: bool=True, seed_dataset: int=None, train_dataset: Dataset=field(init=False), eval_dataset: Dataset=field(init=False), rng_dataset: jnp.ndarray=field(init=False))
at: dalle_mini.data.Dataset
preprocessing_num_workers: int = None
overwrite_cache: bool = False
at: dalle_mini.data.Dataset.__post_init__
self.multi_hosts = jax.process_count > 1
===========changed ref 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
+ multi_hosts: bool = field(init=False)
===========changed ref 1===========
# module: dalle_mini.data
@dataclass
class Dataset:
def __post_init__(self):
+ self.multi_hosts = jax.process_count > 1
# define data_files
if self.train_file is not None or self.validation_file is not None:
# accept braceexpand notation
for k in ["train_file", "validation_file"]:
f = getattr(self, k)
if isinstance(f, str):
setattr(self, k, list(braceexpand(f)))
+ # for list of files, split training data shards by host
+ if isinstance(self.train_file, list) and self.multi_hosts:
+ self.train_file = self.train_file[
+ jax.process_index() :: jax.process_count()
+ ]
data_files = {
"train": self.train_file,
"validation": self.validation_file,
}
else:
data_files = None
# load dataset
dataset = load_dataset(
self.dataset_repo_or_path,
data_files=data_files,
streaming=self.streaming,
use_auth_token=self.use_auth_token,
)
if self.do_train:
if "train" not in dataset:
raise ValueError("Training requires a training dataset")
self.train_dataset = dataset["train"]
if self.max_train_samples is not None:
self.train_dataset = (
self.train_dataset.take(self.max_train_samples)
if self.streaming
else self.train_dataset.select(range(self.max_train_samples))
)
if self.do_eval:
if "validation" not in dataset:
raise ValueError("Evaluating requires a validation dataset")
self.eval_dataset = dataset["validation"]
if self.max_eval_samples is not None:
self.eval_dataset = (
self.eval_dataset.take(self.max_eval_samples)
if self</s>
===========changed ref 2===========
# module: dalle_mini.data
@dataclass
class Dataset:
def __post_init__(self):
# offset: 1
<s> self.eval_dataset = (
self.eval_dataset.take(self.max_eval_samples)
if self.streaming
else self.eval_dataset.select(range(self.max_eval_samples))
)
|
dalle_mini.data/Dataset.__post_init__
|
Modified
|
borisdayma~dalle-mini
|
c6ebb14483bbead46375186afc67af17dddf97b9
|
fix(data): type
|
<0>:<add> self.multi_hosts = jax.process_count() > 1
<del> self.multi_hosts = jax.process_count > 1
|
# module: dalle_mini.data
@dataclass
class Dataset:
def __post_init__(self):
<0> self.multi_hosts = jax.process_count > 1
<1> # define data_files
<2> if self.train_file is not None or self.validation_file is not None:
<3> # accept braceexpand notation
<4> for k in ["train_file", "validation_file"]:
<5> f = getattr(self, k)
<6> if isinstance(f, str):
<7> setattr(self, k, list(braceexpand(f)))
<8> # for list of files, split training data shards by host
<9> if isinstance(self.train_file, list) and self.multi_hosts:
<10> self.train_file = self.train_file[
<11> jax.process_index() :: jax.process_count()
<12> ]
<13> data_files = {
<14> "train": self.train_file,
<15> "validation": self.validation_file,
<16> }
<17> else:
<18> data_files = None
<19>
<20> # load dataset
<21> dataset = load_dataset(
<22> self.dataset_repo_or_path,
<23> data_files=data_files,
<24> streaming=self.streaming,
<25> use_auth_token=self.use_auth_token,
<26> )
<27> if self.do_train:
<28> if "train" not in dataset:
<29> raise ValueError("Training requires a training dataset")
<30> self.train_dataset = dataset["train"]
<31> if self.max_train_samples is not None:
<32> self.train_dataset = (
<33> self.train_dataset.take(self.max_train_samples)
<34> if self.streaming
<35> else self.train_dataset.select(range(self.max_train_samples))
<36> )
<37> if self.do_eval:
<38> if "validation" not in dataset:
<39> raise ValueError("Evaluating requires a validation dataset")
<40> self.eval_dataset = dataset["validation"]
<41> if self.max_eval_</s>
|
===========below chunk 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
def __post_init__(self):
# offset: 1
self.eval_dataset = (
self.eval_dataset.take(self.max_eval_samples)
if self.streaming
else self.eval_dataset.select(range(self.max_eval_samples))
)
===========unchanged ref 0===========
at: dalle_mini.data.Dataset
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
multi_hosts: bool = field(init=False)
at: dalle_mini.data.Dataset.preprocess
self.train_dataset = self.train_dataset.shuffle(1000, self.seed_dataset)
at: datasets.arrow_dataset.Dataset
wrapper(*, keep_in_memory: bool=False, indices_cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, new_fingerprint: Optional[str]=None)
===========unchanged ref 1===========
at: datasets.load
load_dataset(path: str, name: Optional[str]=None, data_dir: Optional[str]=None, data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]]=None, split: Optional[Union[str, Split]]=None, cache_dir: Optional[str]=None, features: Optional[Features]=None, download_config: Optional[DownloadConfig]=None, download_mode: Optional[DownloadMode]=None, ignore_verifications: bool=False, keep_in_memory: Optional[bool]=None, save_infos: bool=False, revision: Optional[Union[str, Version]]=None, use_auth_token: Optional[Union[bool, str]]=None, task: Optional[Union[str, TaskTemplate]]=None, streaming: bool=False, num_proc: Optional[int]=None, *, num_process: int=1, process_id: int=0, seed: Optional[int]=None, experiment_id: Optional[str]=None, max_concurrent_cache_files: int=10000, timeout: Union[int, float]=100, base_path: Optional[str]=None, info: Optional[DatasetInfo]=None, repo_id: Optional[str]=None, **kwargs) -> Union[DatasetDict, Dataset, IterableDatasetDict, IterableDataset]
|
dalle_mini.data/Dataset.__post_init__
|
Modified
|
borisdayma~dalle-mini
|
901ff720f5d802223efb7ea0358981b3cfecc90e
|
feat: shard by host is optional
|
<9>:<add> if (
<add> isinstance(self.train_file, list)
<del> if isinstance(self.train_file, list) and self.multi_hosts:
<10>:<add> and self.multi_hosts
<add> and self.shard_by_host
<add> ):
|
# module: dalle_mini.data
@dataclass
class Dataset:
def __post_init__(self):
<0> self.multi_hosts = jax.process_count() > 1
<1> # define data_files
<2> if self.train_file is not None or self.validation_file is not None:
<3> # accept braceexpand notation
<4> for k in ["train_file", "validation_file"]:
<5> f = getattr(self, k)
<6> if isinstance(f, str):
<7> setattr(self, k, list(braceexpand(f)))
<8> # for list of files, split training data shards by host
<9> if isinstance(self.train_file, list) and self.multi_hosts:
<10> self.train_file = self.train_file[
<11> jax.process_index() :: jax.process_count()
<12> ]
<13> data_files = {
<14> "train": self.train_file,
<15> "validation": self.validation_file,
<16> }
<17> else:
<18> data_files = None
<19>
<20> # load dataset
<21> dataset = load_dataset(
<22> self.dataset_repo_or_path,
<23> data_files=data_files,
<24> streaming=self.streaming,
<25> use_auth_token=self.use_auth_token,
<26> )
<27> if self.do_train:
<28> if "train" not in dataset:
<29> raise ValueError("Training requires a training dataset")
<30> self.train_dataset = dataset["train"]
<31> if self.max_train_samples is not None:
<32> self.train_dataset = (
<33> self.train_dataset.take(self.max_train_samples)
<34> if self.streaming
<35> else self.train_dataset.select(range(self.max_train_samples))
<36> )
<37> if self.do_eval:
<38> if "validation" not in dataset:
<39> raise ValueError("Evaluating requires a validation dataset")
<40> self.eval_dataset = dataset["validation"]
<41> if self.max_eval</s>
|
===========below chunk 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
def __post_init__(self):
# offset: 1
self.eval_dataset = (
self.eval_dataset.take(self.max_eval_samples)
if self.streaming
else self.eval_dataset.select(range(self.max_eval_samples))
)
===========unchanged ref 0===========
at: dalle_mini.data.Dataset
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
shard_by_host: bool = False
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
multi_hosts: bool = field(init=False)
at: dalle_mini.data.Dataset.preprocess
self.train_dataset = self.train_dataset.shuffle(1000, self.seed_dataset)
at: datasets.arrow_dataset.Dataset
wrapper(*, keep_in_memory: bool=False, indices_cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, new_fingerprint: Optional[str]=None)
===========unchanged ref 1===========
at: datasets.load
load_dataset(path: str, name: Optional[str]=None, data_dir: Optional[str]=None, data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]]=None, split: Optional[Union[str, Split]]=None, cache_dir: Optional[str]=None, features: Optional[Features]=None, download_config: Optional[DownloadConfig]=None, download_mode: Optional[DownloadMode]=None, ignore_verifications: bool=False, keep_in_memory: Optional[bool]=None, save_infos: bool=False, revision: Optional[Union[str, Version]]=None, use_auth_token: Optional[Union[bool, str]]=None, task: Optional[Union[str, TaskTemplate]]=None, streaming: bool=False, num_proc: Optional[int]=None, *, num_process: int=1, process_id: int=0, seed: Optional[int]=None, experiment_id: Optional[str]=None, max_concurrent_cache_files: int=10000, timeout: Union[int, float]=100, base_path: Optional[str]=None, info: Optional[DatasetInfo]=None, repo_id: Optional[str]=None, **kwargs) -> Union[DatasetDict, Dataset, IterableDatasetDict, IterableDataset]
===========changed ref 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
+ shard_by_host: bool = False
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
multi_hosts: bool = field(init=False)
|
tools.train.train/MetricsLogger.get_all_train_metrics
|
Modified
|
borisdayma~dalle-mini
|
71435938622a32e5b67a5ae170b38e83db6b7c95
|
fix: weight decay Adam + speed logging
|
<10>:<add> if new_step > self.step:
<add> time_per_step = (new_time - self.time) / (new_step - self.step)
<del> time_per_step = (new_time - self.time) / (new_step - self.step)
<11>:<add> self.step = new_step
<del> self.step = new_step
<12>:<add> self.time = new_time
<del> self.time = new_time
<13>:<add> state_dict["time_per_step"] = time_per_step
<del> return {**metrics, **state_dict, "time_per_step": time_per_step}
<14>:<add> return {**metrics, **state_dict}
|
# module: tools.train.train
class MetricsLogger:
def get_all_train_metrics(self, train_metrics, state):
<0> """Make a dict of training metrics to be logged"""
<1> metrics = unreplicate(train_metrics)
<2> # get state parameters
<3> state_dict = {
<4> k.split("_")[-1]: unreplicate(getattr(state, k))
<5> for k in ["epoch", "train_time", "train_samples"]
<6> }
<7> # timing metrics
<8> new_step = int(unreplicate(state.step))
<9> new_time = time.perf_counter()
<10> time_per_step = (new_time - self.time) / (new_step - self.step)
<11> self.step = new_step
<12> self.time = new_time
<13> return {**metrics, **state_dict, "time_per_step": time_per_step}
<14>
|
===========unchanged ref 0===========
at: time
perf_counter() -> float
at: tools.train.train.MetricsLogger.__init__
self.step = state.step
self.time = time.perf_counter()
|
tools.train.distributed_shampoo/power_iteration
|
Modified
|
borisdayma~dalle-mini
|
b90198c23bd1e1a94314c888686305f880c5d715
|
feat: update distributed_shampoo
|
# module: tools.train.distributed_shampoo
def power_iteration(
matrix,
num_iters=100,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST):
<0> r"""Power iteration algorithm.
<1>
<2> The power iteration algorithm takes a symmetric PSD matrix `A`, and produces
<3> a scalar `\lambda` , which is the greatest (in absolute value) eigenvalue
<4> of `A`, and a vector v, which is the corresponding eigenvector of `A`.
<5>
<6> References:
<7> [Wikipedia, 2021](https://en.wikipedia.org/wiki/Power_iteration)
<8>
<9> Args:
<10> matrix: the symmetric PSD matrix.
<11> num_iters: Number of iterations.
<12> error_tolerance: Iterative exit condition.
<13> precision: precision XLA related flag, the available options are:
<14> a) lax.Precision.DEFAULT (better step time, but not precise)
<15> b) lax.Precision.HIGH (increased precision, slower)
<16> c) lax.Precision.HIGHEST (best possible precision, slowest)
<17>
<18> Returns:
<19> eigen vector, eigen value
<20> """
<21> matrix_size = matrix.shape[-1]
<22> def _iter_condition(state):
<23> i, unused_v, unused_s, unused_s_v, run_step = state
<24> return jnp.logical_and(i < num_iters, run_step)
<25>
<26> def _iter_body(state):
<27> """One step of power iteration."""
<28> i, new_v, s, s_v, unused_run_step = state
<29> new_v = new_v / jnp.linalg.norm(new_v)
<30>
<31> s_v = jnp.einsum('ij,j->i', matrix, new_v, precision=precision)
<32> s_new = jnp.einsum('i,i->', new_v, s_v, precision=precision)
<33> return (i + 1, s_v, s_</s>
|
===========below chunk 0===========
# module: tools.train.distributed_shampoo
def power_iteration(
matrix,
num_iters=100,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST):
# offset: 1
jnp.greater(jnp.abs(s_new - s), error_tolerance))
# Figure out how to use step as seed for random.
v_0 = np.random.uniform(-1.0, 1.0, matrix_size).astype(matrix.dtype)
init_state = tuple([0, v_0, jnp.zeros([], dtype=matrix.dtype), v_0, True])
_, v_out, s_out, _, _ = lax.while_loop(
_iter_condition, _iter_body, init_state)
v_out = v_out / jnp.linalg.norm(v_out)
return v_out, s_out
===========unchanged ref 0===========
at: tools.train.distributed_shampoo
QuantizedValue()
at: tools.train.distributed_shampoo.QuantizedValue
quantized: chex.Array
diagonal: chex.Array # Diagonal (if extract_diagonal is set)
bucket_size: chex.Array
quantized_dtype: jnp.dtype = struct.field(
pytree_node=False) # Dtype for the quantized value.
extract_diagonal: bool = struct.field(
pytree_node=False) # In case its centered.
shape: Any = struct.field(pytree_node=False) # Shape of the tensor.
at: tools.train.distributed_shampoo.QuantizedValue.quantize
num_buckets = jnp.array(32767.0, dtype=float_dtype)
num_buckets = jnp.array(127.0, dtype=float_dtype)
diagonal_fvalue = []
at: typing
List = _alias(list, 1, inst=False, name='List')
NamedTuple(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
===========changed ref 0===========
# module: tools.train.distributed_shampoo
+ # pylint:disable=no-value-for-parameter
+ @struct.dataclass
+ class QuantizedValue:
+ """State associated with quantized value."""
+ quantized: chex.Array
+ diagonal: chex.Array # Diagonal (if extract_diagonal is set)
+ bucket_size: chex.Array
+ quantized_dtype: jnp.dtype = struct.field(
+ pytree_node=False) # Dtype for the quantized value.
+ extract_diagonal: bool = struct.field(
+ pytree_node=False) # In case its centered.
+ shape: Any = struct.field(pytree_node=False) # Shape of the tensor.
+
===========changed ref 1===========
# module: tools.train.distributed_shampoo
+ # pylint:disable=no-value-for-parameter
+ @struct.dataclass
+ class QuantizedValue:
+ @classmethod
+ def from_float_value(cls, fvalue, quantized_dtype, extract_diagonal=False):
+ if isinstance(fvalue, list) and not fvalue:
+ return QuantizedValue([], [], [], quantized_dtype, extract_diagonal, [])
+ quantized, diagonal_fvalue, bucket_size = QuantizedValue.quantize(
+ fvalue, quantized_dtype, extract_diagonal)
+ return QuantizedValue(quantized, diagonal_fvalue, bucket_size,
+ quantized_dtype, extract_diagonal,
+ list(quantized.shape))
+
===========changed ref 2===========
# module: tools.train.distributed_shampoo
# These are per-parameter local states; All statistics here mirror the parameter
# Thus the sharding is copied over from the param specification.
@struct.dataclass
class LocalShardedParameterStats:
"""State associated to each parameter of the model being trained."""
+ diagonal_statistics: QuantizedValue # Accumulator for diagonal preconditioner
- diagonal_statistics: chex.Array # Accumulator for diagonal preconditioner
+ diagonal_momentum: QuantizedValue # Momentum for the diagonal preconditioner
- diagonal_momentum: chex.Array # Momentum for the diagonal preconditioner
+ momentum: QuantizedValue # Momentum for the shampoo preconditioner
- momentum: chex.Array # Momentum for the shampoo preconditioner
index_start: np.int32 = struct.field(
pytree_node=False) # Index into global statistics array
sizes: Any = struct.field(pytree_node=False) # Sizes of the statistics.
===========changed ref 3===========
# module: tools.train.distributed_shampoo
- # pylint:disable=no-value-for-parameter
-
-
# Per parameter optimizer state used in data-parallel training.
class ParameterStats(NamedTuple):
"""State associated to each parameter of the model being trained."""
+ diagonal_statistics: QuantizedValue # Accumulator for diagonal preconditioner
- diagonal_statistics: chex.Array # Accumulator for diagonal preconditioner
+ statistics: List[Any] # Statistics (QuantizedValue, chex.Array)
+ preconditioners: List[Any] # Preconditioners (QuantizedValue, chex.Array)
- statistics: chex.Array # Statistics
- preconditioners: chex.Array # Preconditioners
+ diagonal_momentum: QuantizedValue # Momentum for the diagonal preconditioner
- diagonal_momentum: chex.Array # Momentum for the diagonal preconditioner
+ momentum: QuantizedValue # Momentum for the shampoo preconditioner
- momentum: chex.Array # Momentum for the shampoo preconditioner
===========changed ref 4===========
# module: tools.train.distributed_shampoo
+ # pylint:disable=no-value-for-parameter
+ @struct.dataclass
+ class QuantizedValue:
+ # Quantization is from Lingvo JAX optimizers.
+ # We extend it for int16 quantization of PSD matrices.
+ @classmethod
+ def quantize(cls, fvalue, quantized_dtype, extract_diagonal=False):
+ """Returns quantized value and the bucket."""
+ if quantized_dtype == jnp.float32:
+ return fvalue, [], []
+ elif quantized_dtype == jnp.bfloat16:
+ return fvalue.astype(jnp.bfloat16), [], []
+
+ float_dtype = fvalue.dtype
+ if quantized_dtype == jnp.int8:
+ # value -128 is not used.
+ num_buckets = jnp.array(127.0, dtype=float_dtype)
+ elif quantized_dtype == jnp.int16:
+ # value -32768 is not used.
+ num_buckets = jnp.array(32767.0, dtype=float_dtype)
+ else:
+ raise ValueError(f'Quantized dtype {quantized_dtype} not supported.')
+ # max value is mapped to num_buckets
+
+ if extract_diagonal and fvalue.ndim != 2:
+ raise ValueError(
+ f'Input array {fvalue} must be 2D to work with extract_diagonal.')
+
+ diagonal_fvalue = []
+ if extract_diagonal:
+ diagonal_fvalue = jnp.diag(fvalue)
+ # Remove the diagonal entries.
+ fvalue = fvalue - jnp.diag(diagonal_fvalue)
+
+ # TODO(rohananil): Extend this by making use of information about the blocks
+ # SM3 style which will be useful for diagonal statistics
+ # We first decide the scale.
+ if fvalue.ndim < 1:
+ raise ValueError(
+ f'Input array {f</s>
|
|
dalle_mini.model.modeling/DalleBart.from_pretrained
|
Modified
|
borisdayma~dalle-mini
|
55a631d3e4bcadc2f30018266016985a13e391b4
|
Store resolved path after loading model.
|
<10>:<del>
<11>:<del> return super(DalleBart, cls).from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
|
# module: dalle_mini.model.modeling
class DalleBart(FlaxBartPreTrainedModel, FlaxBartForConditionalGeneration):
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
<0> """
<1> Initializes from a wandb artifact, or delegates loading to the superclass.
<2> """
<3> if ':' in pretrained_model_name_or_path:
<4> # wandb artifact
<5> artifact = wandb.Api().artifact(pretrained_model_name_or_path)
<6>
<7> # we download everything, including opt_state, so we can resume training if needed
<8> # see also: #120
<9> pretrained_model_name_or_path = artifact.download()
<10>
<11> return super(DalleBart, cls).from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
<12>
|
===========unchanged ref 0===========
at: dalle_mini.model.modeling
DalleBart(config: DalleBartConfig, input_shape: Tuple[int]=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32, abstract_init: bool=False, **kwargs)
at: transformers.modeling_flax_utils.FlaxPreTrainedModel
config_class = None
base_model_prefix = ""
main_input_name = "input_ids"
_auto_class = None
_missing_keys = set()
from_pretrained(pretrained_model_name_or_path: Union[str, os.PathLike], dtype: jnp.dtype=jnp.float32, module: nn.Module, input_shape: Tuple=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32, /, *, config: Optional[Union[PretrainedConfig, str, os.PathLike]]=None, cache_dir: Optional[Union[str, os.PathLike]]=None, ignore_mismatched_sizes: bool=False, force_download: bool=False, local_files_only: bool=False, token: Optional[Union[str, bool]]=None, revision: str="main", config_file_name: Optional[Union[str, os.PathLike]]=None, **kwargs)
at: wandb
Api = PublicApi
at: wandb.apis.public.Api
_HTTP_TIMEOUT = env.get_http_timeout(9)
VIEWER_QUERY = gql(
"""
query Viewer{
viewer {
id
flags
entity
username
email
admin
apiKeys {
edges {
node {
id
name
description
}
}
}
teams {
edges {
node {
name
}
}
}
}
}
"""
)
===========unchanged ref 1===========
USERS_QUERY = gql(
"""
query SearchUsers($query: String) {
users(query: $query) {
edges {
node {
id
flags
entity
admin
email
deletedAt
username
apiKeys {
edges {
node {
id
name
description
}
}
}
teams {
edges {
node {
name
}
}
}
}
}
}
}
"""
)
CREATE_PROJECT = gql(
"""
mutation upsertModel(
$description: String
$entityName: String
$id: String
$name: String
$framework: String
$access: String
$views: JSONString
) {
upsertModel(
input: {
description: $description
entityName: $entityName
id: $id
name: $name
framework: $framework
access: $access
views: $views
}
) {
project {
id
name
entityName
description
access
views
}
model {
id
name
entityName
description
access
views
}
inserted
}
}
"""
)
artifact(name, type=None)
at: wandb.apis.public.Artifact
QUERY = gql(
"""
query ArtifactWithCurrentManifest(
$id: ID!,
) {
artifact(id: $id) {
currentManifest {
id
file {
id
directUrl
}
}
...ArtifactFragment
}
}
%s
"""
% ARTIFACT_FRAGMENT
)
download(root=None, recursive=False)
|
dalle_mini.model.modeling/DalleBart.from_pretrained
|
Modified
|
borisdayma~dalle-mini
|
08dd098784c1d209106bbb836689fd4b67ae78e2
|
Never consider local dirs as remote wandb references.
|
<3>:<add> if ':' in pretrained_model_name_or_path and not os.path.isdir(pretrained_model_name_or_path):
<del> if ':' in pretrained_model_name_or_path:
|
# module: dalle_mini.model.modeling
class DalleBart(FlaxBartPreTrainedModel, FlaxBartForConditionalGeneration):
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
<0> """
<1> Initializes from a wandb artifact, or delegates loading to the superclass.
<2> """
<3> if ':' in pretrained_model_name_or_path:
<4> # wandb artifact
<5> artifact = wandb.Api().artifact(pretrained_model_name_or_path)
<6>
<7> # we download everything, including opt_state, so we can resume training if needed
<8> # see also: #120
<9> pretrained_model_name_or_path = artifact.download()
<10>
<11> model = super(DalleBart, cls).from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
<12> model.config.resolved_name_or_path = pretrained_model_name_or_path
<13> return model
<14>
|
===========unchanged ref 0===========
at: dalle_mini.model.modeling
DalleBart(config: DalleBartConfig, input_shape: Tuple[int]=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32, abstract_init: bool=False, **kwargs)
at: os.path
isdir(s: AnyPath) -> bool
at: transformers.modeling_flax_utils.FlaxPreTrainedModel
config_class = None
base_model_prefix = ""
main_input_name = "input_ids"
_auto_class = None
_missing_keys = set()
from_pretrained(pretrained_model_name_or_path: Union[str, os.PathLike], dtype: jnp.dtype=jnp.float32, module: nn.Module, input_shape: Tuple=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32, /, *, config: Optional[Union[PretrainedConfig, str, os.PathLike]]=None, cache_dir: Optional[Union[str, os.PathLike]]=None, ignore_mismatched_sizes: bool=False, force_download: bool=False, local_files_only: bool=False, token: Optional[Union[str, bool]]=None, revision: str="main", config_file_name: Optional[Union[str, os.PathLike]]=None, **kwargs)
at: wandb
Api = PublicApi
at: wandb.apis.public.Api
_HTTP_TIMEOUT = env.get_http_timeout(9)
VIEWER_QUERY = gql(
"""
query Viewer{
viewer {
id
flags
entity
username
email
admin
apiKeys {
edges {
node {
id
name
description
}
}
}
teams {
edges {
node {
name
}
}
}
}
}
"""
)
===========unchanged ref 1===========
USERS_QUERY = gql(
"""
query SearchUsers($query: String) {
users(query: $query) {
edges {
node {
id
flags
entity
admin
email
deletedAt
username
apiKeys {
edges {
node {
id
name
description
}
}
}
teams {
edges {
node {
name
}
}
}
}
}
}
}
"""
)
CREATE_PROJECT = gql(
"""
mutation upsertModel(
$description: String
$entityName: String
$id: String
$name: String
$framework: String
$access: String
$views: JSONString
) {
upsertModel(
input: {
description: $description
entityName: $entityName
id: $id
name: $name
framework: $framework
access: $access
views: $views
}
) {
project {
id
name
entityName
description
access
views
}
model {
id
name
entityName
description
access
views
}
inserted
}
}
"""
)
artifact(name, type=None)
at: wandb.apis.public.Artifact
QUERY = gql(
"""
query ArtifactWithCurrentManifest(
$id: ID!,
) {
artifact(id: $id) {
currentManifest {
id
file {
id
directUrl
}
}
...ArtifactFragment
}
}
%s
"""
% ARTIFACT_FRAGMENT
)
download(root=None, recursive=False)
|
dalle_mini.data/Dataset.dataloader
|
Modified
|
borisdayma~dalle-mini
|
f69b21b3d8906f5f5bdfcc58fe9d26f53cdcf560
|
Load from wandb artifact (#121)
|
<0>:<add> num_devices = jax.local_device_count()
<add>
<2>:<add> per_device_batch_size: int,
<del> batch_size: int,
<3>:<add> gradient_accumulation_steps: int,
<7>:<add> Shuffle batches if rng is set.
<del> Shuffle batches if `shuffle` is `True`.
<9>:<add> batch_size = (
<add> per_device_batch_size * num_devices * gradient_accumulation_steps
<add> )
<24>:<add> if gradient_accumulation_steps is not None:
<add> batch = jax.tree_map(
<add> lambda x: x.reshape((-1, per_device_batch_size) + x.shape[1:]),
<add> batch,
<add> )
<28>:<add> dataset: Dataset,
<add> split: str,
<add> per_device_batch_size: int,
<add> gradient_accumulation_steps: int,
<add> epoch: int,
<del> dataset: Dataset, batch_size: int, epoch: int
<30>:<del> # epoch is only use for multi-host
<33>:<add> first_loop = True # stop after one loop in some cases
<del> first_loop = True
|
# module: dalle_mini.data
@dataclass
class Dataset:
+ def dataloader(
+ self, split, per_device_batch_size, gradient_accumulation_steps=None, epoch=None
+ ):
- def dataloader(self, split, batch_size, epoch=None):
<0> def _dataloader_datasets_non_streaming(
<1> dataset: Dataset,
<2> batch_size: int,
<3> rng: jax.random.PRNGKey = None,
<4> ):
<5> """
<6> Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices.
<7> Shuffle batches if `shuffle` is `True`.
<8> """
<9> steps_per_epoch = len(dataset) // batch_size
<10>
<11> if rng is not None:
<12> batch_idx = jax.random.permutation(rng, len(dataset))
<13> else:
<14> batch_idx = jnp.arange(len(dataset))
<15>
<16> batch_idx = batch_idx[
<17> : steps_per_epoch * batch_size
<18> ] # Skip incomplete batch.
<19> batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))
<20>
<21> for idx in batch_idx:
<22> batch = dataset[idx]
<23> batch = {k: jnp.array(v) for k, v in batch.items()}
<24> batch = shard(batch)
<25> yield batch
<26>
<27> def _dataloader_datasets_streaming(
<28> dataset: Dataset, batch_size: int, epoch: int
<29> ):
<30> # epoch is only use for multi-host
<31> keys = ["input_ids", "attention_mask", "labels", "decoder_input_ids"]
<32> batch = {k: [] for k in keys}
<33> first_loop = True
<34> while self.multi_hosts or first_loop:
<35> # in multi-host, we run forever (no epoch) as hosts need to stop
<36> # at the same time and we don</s>
|
===========below chunk 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
+ def dataloader(
+ self, split, per_device_batch_size, gradient_accumulation_steps=None, epoch=None
+ ):
- def dataloader(self, split, batch_size, epoch=None):
# offset: 1
if not first_loop:
# multi-host setting, we reshuffle shards
epoch += 1
dataset.set_epoch(epoch)
for item in dataset:
for k, v in item.items():
batch[k].append(v)
if len(batch[keys[0]]) == batch_size:
batch = {k: jnp.array(v) for k, v in batch.items()}
batch = shard(batch)
yield batch
batch = {k: [] for k in keys}
first_loop = False
if split == "train":
ds = self.train_dataset
elif split == "eval":
ds = self.eval_dataset
else:
raise ValueError(f'split must be "train" or "eval", got {split}')
if self.streaming:
if split == "train":
ds.set_epoch(epoch)
return _dataloader_datasets_streaming(ds, batch_size, epoch)
else:
if split == "train":
self.rng_dataset, input_rng = jax.random.split(self.rng_dataset)
return _dataloader_datasets_non_streaming(ds, batch_size, input_rng)
===========unchanged ref 0===========
at: dalle_mini.data
Dataset(dataset_repo_or_path: str, train_file: str=None, validation_file: str=None, streaming: bool=True, use_auth_token: bool=False, text_column: str="caption", encoding_column: str="encoding", max_train_samples: int=None, max_eval_samples: int=None, preprocessing_num_workers: int=None, overwrite_cache: bool=False, do_train: bool=False, do_eval: bool=True, seed_dataset: int=None, shard_by_host: bool=False, train_dataset: Dataset=field(init=False), eval_dataset: Dataset=field(init=False), rng_dataset: jnp.ndarray=field(init=False), multi_hosts: bool=field(init=False))
at: dalle_mini.data.Dataset
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
shard_by_host: bool = False
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
multi_hosts: bool = field(init=False)
at: dalle_mini.data.Dataset.__post_init__
self.multi_hosts = jax.process_count() > 1
===========changed ref 0===========
# module: tools.train.train
@dataclass
class TrainingArguments:
+ def __post_init__(self):
+ assert self.optim in [
+ "distributed_shampoo",
+ "adam",
+ "adafactor",
+ ], f"Selected optimizer not supported: {self.optim}"
+ if self.per_device_eval_batch_size is None:
+ self.per_device_eval_batch_size = self.per_device_train_batch_size
+ if self.weight_decay is None:
+ if self.optim in ["distributed_shampoo", "adam"]:
+ self.weight_decay = 0.0
+ if (
+ os.path.exists(self.output_dir)
+ and os.listdir(self.output_dir)
+ and self.do_train
+ and not self.overwrite_output_dir
+ ):
+ raise ValueError(
+ f"Output directory ({self.output_dir}) already exists and is not empty."
+ "Use --overwrite_output_dir to overcome."
+ )
+
===========changed ref 1===========
# module: tools.train.train
- def create_learning_rate_fn(
- num_warmup_steps: int,
- learning_rate: float,
- use_decay: bool,
- num_train_steps: int = None, # used only with `use_decay`, typically train_size // batch_size * num_epochs
- ) -> Callable[[int], jnp.array]:
- """Returns a linear warmup, linear_decay learning rate function."""
- if use_decay:
- assert (
- num_train_steps is not None
- ), "Learning rate with decay requires number of training steps"
- warmup_fn = optax.linear_schedule(
- init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps
- )
- if not use_decay:
- return warmup_fn
- decay_fn = optax.linear_schedule(
- init_value=learning_rate,
- end_value=0,
- transition_steps=num_train_steps - num_warmup_steps,
- )
- schedule_fn = optax.join_schedules(
- schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps]
- )
- return schedule_fn
-
===========changed ref 2===========
# module: tools.train.train
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. "
"Don't set if you want to train a model from scratch. "
"W&B artifact references are supported in addition to the sources supported by `PreTrainedModel`."
},
)
config_name: Optional[str] = field(
default=None,
metadata={
+ "help": "Pretrained config name or path if not the same as model_name_or_path"
- "help": "Pretrained config name or path if not the same as model_name"
},
)
tokenizer_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained tokenizer name or path if not the same as model_name_or_path"
},
)
dtype: Optional[str] = field(
default="float32",
metadata={
+ "help": "Floating-point format in which the computations will be performed (not the model weights). Choose one of `[float32, float16, bfloat16]`."
- "help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`."
},
)
|
tools.train.train/TrainingArguments.__post_init__
|
Modified
|
borisdayma~dalle-mini
|
0081723a66509d60d116a8eda9c1ae27dada8e3a
|
feat(train): start pjit support
|
<7>:<del> if self.weight_decay is None:
<8>:<del> if self.optim in ["distributed_shampoo", "adam"]:
<9>:<del> self.weight_decay = 0.0
<20>:<add> assert (
<add> jax.device_count() % self.mp_devices == 0
<add> ), f"Number of available devices ({jax.device_count()} must be divisible by number of devices used for model parallelism ({self.mp_devices})."
<add> self.dp_devices = jax.device_count() // self.mp_devices
|
# module: tools.train.train
@dataclass
class TrainingArguments:
def __post_init__(self):
<0> assert self.optim in [
<1> "distributed_shampoo",
<2> "adam",
<3> "adafactor",
<4> ], f"Selected optimizer not supported: {self.optim}"
<5> if self.per_device_eval_batch_size is None:
<6> self.per_device_eval_batch_size = self.per_device_train_batch_size
<7> if self.weight_decay is None:
<8> if self.optim in ["distributed_shampoo", "adam"]:
<9> self.weight_decay = 0.0
<10> if (
<11> os.path.exists(self.output_dir)
<12> and os.listdir(self.output_dir)
<13> and self.do_train
<14> and not self.overwrite_output_dir
<15> ):
<16> raise ValueError(
<17> f"Output directory ({self.output_dir}) already exists and is not empty."
<18> "Use --overwrite_output_dir to overcome."
<19> )
<20>
|
===========unchanged ref 0===========
at: dataclasses
field(*, default_factory: Callable[[], _T], init: bool=..., repr: bool=..., hash: Optional[bool]=..., compare: bool=..., metadata: Optional[Mapping[str, Any]]=...) -> _T
field(*, init: bool=..., repr: bool=..., hash: Optional[bool]=..., compare: bool=..., metadata: Optional[Mapping[str, Any]]=...) -> Any
field(*, default: _T, init: bool=..., repr: bool=..., hash: Optional[bool]=..., compare: bool=..., metadata: Optional[Mapping[str, Any]]=...) -> _T
at: tools.train.train.TrainingArguments
output_dir: str = field(
metadata={
"help": "The output directory where the model predictions and checkpoints will be written."
},
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory. "
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(
default=False, metadata={"help": "Whether to run eval on the validation set."}
)
per_device_train_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU/CPU for training."}
)
per_device_eval_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Batch size per GPU/TPU/CPU for evaluation. Same as training batch size if not set."
},
)
gradient_accumulation_steps: int = field(
default=1,
metadata={
"help": "Number of updates steps to accumulate before performing an update pass."
},
)
===========unchanged ref 1===========
learning_rate: float = field(
default=5e-5, metadata={"help": "The initial learning rate."}
)
optim: str = field(
default="distributed_shampoo",
metadata={
"help": 'The optimizer to use. Can be "distributed_shampoo" (default), "adam" or "adafactor"'
},
)
beta1: float = field(
default=0.9,
metadata={"help": "Beta1 for Adam & Distributed Shampoo."},
)
beta2: float = field(
default=0.999,
metadata={"help": "Beta2 for for Adam & Distributed Shampoo."},
)
adam_epsilon: float = field(
default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."}
)
max_grad_norm: float = field(
default=1.0, metadata={"help": "Max gradient norm for Adafactor."}
)
block_size: int = field(
default=1024,
metadata={"help": "Chunked size for large layers with Distributed Shampoo."},
)
preconditioning_compute_steps: int = field(
default=10, metadata={"help": "Number of steps to update preconditioner."}
)
skip_preconditioning_dim_size_gt: int = field(
default=4096,
metadata={"help": "Max size for preconditioning with Distributed Shampoo."},
)
optim_quantized: bool = field(
default=False,
metadata={
"help": "Whether to quantize optimizer (only supported with Distributed Shampoo)."
},
)
num_train_epochs: int = field(
default=3, metadata={"help": "Total number of training epochs to perform."}
)
===========unchanged ref 2===========
warmup_steps: int = field(
default=0, metadata={"help": "Linear warmup over warmup_steps."}
)
lr_decay: str = field(
default=None,
metadata={
"help": "Decay to be used in the learning rate scheduler. Can be None (default), linear or exponential."
},
)
lr_transition_steps: int = field(
default=None,
metadata={
"help": "Number of transition steps associated with learning rate decay when using exponential decay."
},
)
lr_decay_rate: float = field(
default=None,
metadata={
"help": "Decay rate associated with learning rate when using exponential decay."
},
)
lr_staircase: bool = field(
default=False,
metadata={
"help": "Whether to use staircase or continuous learning rate when using exponential decay."
},
)
logging_steps: int = field(
default=40, metadata={"help": "Log every X updates steps."}
)
eval_steps: int = field(
default=400, metadata={"help": "Run an evaluation every X steps."}
)
save_steps: int = field(
default=4000, metadata={"help": "Save checkpoint every X updates steps."}
)
log_model: bool = field(
default=False,
metadata={"help": "Log model to wandb at `save_steps` frequency."},
)
seed_model: int = field(
default=42,
metadata={
"help": "Random seed for the model that will be set at the beginning of training."
},
)
resume_from_checkpoint: Optional[str] = field(
default=None,
metadata={"help": "Reference to a wandb artifact for resuming training."},
)
===========unchanged ref 3===========
wandb_entity: Optional[str] = field(
default=None,
metadata={"help": "The wandb entity to use (for teams)."},
)
wandb_project: str = field(
default="dalle-mini",
metadata={"help": "The name of the wandb project."},
)
wandb_job_type: str = field(
default="Seq2Seq",
metadata={"help": "The name of the wandb job type."},
)
|
tools.train.train/MetricsLogger.get_all_train_metrics
|
Modified
|
borisdayma~dalle-mini
|
49597a2091cfa137f599d76353f3a8b77a819a11
|
feat(train): progress on pjit
|
<1>:<add> metrics = train_metrics
<del> metrics = unreplicate(train_metrics)
<4>:<add> k.split("_")[-1]: getattr(state, k)
<del> k.split("_")[-1]: unreplicate(getattr(state, k))
<8>:<add> new_step = int(state.step)
<del> new_step = int(unreplicate(state.step))
|
# module: tools.train.train
class MetricsLogger:
def get_all_train_metrics(self, train_metrics, state):
<0> """Make a dict of training metrics to be logged"""
<1> metrics = unreplicate(train_metrics)
<2> # get state parameters
<3> state_dict = {
<4> k.split("_")[-1]: unreplicate(getattr(state, k))
<5> for k in ["epoch", "train_time", "train_samples"]
<6> }
<7> # timing metrics
<8> new_step = int(unreplicate(state.step))
<9> new_time = time.perf_counter()
<10> if new_step > self.step:
<11> time_per_step = (new_time - self.time) / (new_step - self.step)
<12> self.step = new_step
<13> self.time = new_time
<14> state_dict["time_per_step"] = time_per_step
<15> return {**metrics, **state_dict}
<16>
|
===========unchanged ref 0===========
at: time
perf_counter() -> float
at: tools.train.train.MetricsLogger.__init__
self.step = state.step
self.time = time.perf_counter()
|
dalle_mini.data/Dataset.dataloader
|
Modified
|
borisdayma~dalle-mini
|
49597a2091cfa137f599d76353f3a8b77a819a11
|
feat(train): progress on pjit
|
<35>:<del> batch = shard(batch)
|
# module: dalle_mini.data
@dataclass
class Dataset:
def dataloader(
self, split, per_device_batch_size, gradient_accumulation_steps=None, epoch=None
):
<0> num_devices = jax.local_device_count()
<1>
<2> def _dataloader_datasets_non_streaming(
<3> dataset: Dataset,
<4> per_device_batch_size: int,
<5> gradient_accumulation_steps: int,
<6> rng: jax.random.PRNGKey = None,
<7> ):
<8> """
<9> Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices.
<10> Shuffle batches if rng is set.
<11> """
<12> batch_size = (
<13> per_device_batch_size * num_devices * gradient_accumulation_steps
<14> )
<15> steps_per_epoch = len(dataset) // batch_size
<16>
<17> if rng is not None:
<18> batch_idx = jax.random.permutation(rng, len(dataset))
<19> else:
<20> batch_idx = jnp.arange(len(dataset))
<21>
<22> batch_idx = batch_idx[
<23> : steps_per_epoch * batch_size
<24> ] # Skip incomplete batch.
<25> batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))
<26>
<27> for idx in batch_idx:
<28> batch = dataset[idx]
<29> batch = {k: jnp.array(v) for k, v in batch.items()}
<30> if gradient_accumulation_steps is not None:
<31> batch = jax.tree_map(
<32> lambda x: x.reshape((-1, per_device_batch_size) + x.shape[1:]),
<33> batch,
<34> )
<35> batch = shard(batch)
<36> yield batch
<37>
<38> def _dataloader_datasets_streaming(
<39> dataset: Dataset,
<40> split: str,
<41> per_</s>
|
===========below chunk 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
def dataloader(
self, split, per_device_batch_size, gradient_accumulation_steps=None, epoch=None
):
# offset: 1
gradient_accumulation_steps: int,
epoch: int,
):
keys = ["input_ids", "attention_mask", "labels", "decoder_input_ids"]
batch = {k: [] for k in keys}
first_loop = True # stop after one loop in some cases
while (self.multi_hosts and split == "train") or first_loop:
# in multi-host, we run forever (no epoch) as hosts need to stop
# at the same time and training data may not be split equally
# For validation data we put the entire set on each host as we could lose
# too many samples on pods
if epoch is not None:
# reshuffle training data at each epoch (not applicable with validation set)
dataset.set_epoch(epoch)
epoch += 1
for item in dataset:
for k, v in item.items():
batch[k].append(v)
# batch = 5, devices = 8, accumulation = 2 / batch_size = 5 x 8
# (40, 3, 3) -> shard 8 x (5, 3, 3)
# (16, 5, 3, 3) -> shard 8 x (2, 5, 3, 3)
if len(batch[keys[0]]) == per_device_batch_size * num_devices * (
gradient_accumulation_steps
if gradient_accumulation_steps is not None
else 1
):
batch = {k: jnp.array(v) for k, v in batch.items()}
if gradient_accumulation_steps is not None:
batch = jax.tree_map(
lambda x: x.reshape(
(-1, per_device_batch_size) + x.shape[1:]
),
batch,
)
batch = shard(batch)
</s>
===========below chunk 1===========
# module: dalle_mini.data
@dataclass
class Dataset:
def dataloader(
self, split, per_device_batch_size, gradient_accumulation_steps=None, epoch=None
):
# offset: 2
<s>size) + x.shape[1:]
),
batch,
)
batch = shard(batch)
yield batch
batch = {k: [] for k in keys}
first_loop = False
if split == "train":
ds = self.train_dataset
elif split == "eval":
ds = self.eval_dataset
else:
raise ValueError(f'split must be "train" or "eval", got {split}')
if self.streaming:
return _dataloader_datasets_streaming(
ds, split, per_device_batch_size, gradient_accumulation_steps, epoch
)
else:
if split == "train":
self.rng_dataset, input_rng = jax.random.split(self.rng_dataset)
return _dataloader_datasets_non_streaming(
ds, per_device_batch_size, gradient_accumulation_steps, input_rng
)
===========unchanged ref 0===========
at: dalle_mini.data
Dataset(dataset_repo_or_path: str, train_file: str=None, validation_file: str=None, streaming: bool=True, use_auth_token: bool=False, text_column: str="caption", encoding_column: str="encoding", max_train_samples: int=None, max_eval_samples: int=None, preprocessing_num_workers: int=None, overwrite_cache: bool=False, do_train: bool=False, do_eval: bool=True, seed_dataset: int=None, shard_by_host: bool=False, train_dataset: Dataset=field(init=False), eval_dataset: Dataset=field(init=False), rng_dataset: jnp.ndarray=field(init=False), multi_hosts: bool=field(init=False))
at: dalle_mini.data.Dataset
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
shard_by_host: bool = False
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
multi_hosts: bool = field(init=False)
at: dalle_mini.data.Dataset.__post_init__
self.multi_hosts = jax.process_count() > 1
===========unchanged ref 1===========
self.train_dataset = dataset["train"]
self.train_dataset = (
self.train_dataset.take(self.max_train_samples)
if self.streaming
else self.train_dataset.select(range(self.max_train_samples))
)
self.eval_dataset = dataset["validation"]
self.eval_dataset = (
self.eval_dataset.take(self.max_eval_samples)
if self.streaming
else self.eval_dataset.select(range(self.max_eval_samples))
)
at: dalle_mini.data.Dataset.preprocess
self.train_dataset = self.train_dataset.shuffle(1000, self.seed_dataset)
self.rng_dataset = jax.random.PRNGKey(self.seed_dataset)
===========changed ref 0===========
# module: tools.train.train
class MetricsLogger:
def get_all_train_metrics(self, train_metrics, state):
"""Make a dict of training metrics to be logged"""
+ metrics = train_metrics
- metrics = unreplicate(train_metrics)
# get state parameters
state_dict = {
+ k.split("_")[-1]: getattr(state, k)
- k.split("_")[-1]: unreplicate(getattr(state, k))
for k in ["epoch", "train_time", "train_samples"]
}
# timing metrics
+ new_step = int(state.step)
- new_step = int(unreplicate(state.step))
new_time = time.perf_counter()
if new_step > self.step:
time_per_step = (new_time - self.time) / (new_step - self.step)
self.step = new_step
self.time = new_time
state_dict["time_per_step"] = time_per_step
return {**metrics, **state_dict}
|
dalle_mini.model.modeling/FlaxBartPreTrainedModel.__init__
|
Modified
|
borisdayma~dalle-mini
|
12f323d7f80887ac0be14c310630f4a26241724f
|
feat(model): clean way to load on cpu
|
<17>:<add> # init weights on CPU
<add> if load_on_cpu:
<add> init_fn = jax.jit(self.init_weights, static_argnums=(1,), backend="cpu")
<add> else:
<add> init_fn = self.init_weights
<add>
<22>:<add> init_fn = partial(init_fn, input_shape=input_shape)
<del> init_fn = partial(self.init_weights, input_shape=input_shape)
<25>:<add> random_params = init_fn(self.key, input_shape)
<del> random_params = self.init_weights(self.key, input_shape)
|
# module: dalle_mini.model.modeling
class FlaxBartPreTrainedModel(FlaxBartPreTrainedModel):
def __init__(
self,
config: DalleBartConfig,
input_shape: Tuple[int] = (1, 1),
seed: int = 0,
dtype: jnp.dtype = jnp.float32,
abstract_init: bool = False,
+ load_on_cpu: bool = True,
**kwargs,
):
<0> module = self.module_class(config=config, dtype=dtype, **kwargs)
<1>
<2> # adapted from HuggingFace FlaxPreTrainedModel
<3> if config is None:
<4> raise ValueError("config cannot be None")
<5>
<6> if module is None:
<7> raise ValueError("module cannot be None")
<8>
<9> # Those are private to be exposed as typed property on derived classes.
<10> self._config = config
<11> self._module = module
<12>
<13> # Those are public as their type is generic to every derived classes.
<14> self.key = PRNGKey(seed)
<15> self.dtype = dtype
<16>
<17> # randomly initialized parameters
<18> if abstract_init:
<19> # init the model weights only abstractly, eval_shape will return a pytree
<20> # with the structure as weights but without any actual values, this will just contain
<21> # the shape information. Weights need to be loaded later.
<22> init_fn = partial(self.init_weights, input_shape=input_shape)
<23> random_params = jax.eval_shape(init_fn, self.key)
<24> else:
<25> random_params = self.init_weights(self.key, input_shape)
<26>
<27> # save required_params as set
<28> self._required_params = set(flatten_dict(unfreeze(random_params)).keys())
<29> self.params = random_params
<30>
|
===========unchanged ref 0===========
at: transformers.modeling_flax_utils.FlaxPreTrainedModel
config_class = None
base_model_prefix = ""
main_input_name = "input_ids"
_auto_class = None
_missing_keys = set()
at: transformers.models.bart.modeling_flax_bart.FlaxBartPreTrainedModel
config_class = BartConfig
base_model_prefix: str = "model"
module_class: nn.Module = None
__init__(self, config: BartConfig, input_shape: Tuple[int]=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs)
init_weights(rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict
at: typing
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
===========changed ref 0===========
# module: tools.train.train
def main():
# See all possible arguments by passing the --help flag to this script.
parser = HfArgumentParser(
(ModelArguments, DataTrainingArguments, TrainingArguments)
)
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
if jax.process_index() == 0:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# Set the verbosity to info of the Transformers logger (on main process only):
logger.info(f"Training/evaluation parameters {training_args}")
# Load dataset
dataset = Dataset(
**asdict(data_args),
do_train=training_args.do_train,
do_eval=training_args.do_eval,
)
logger.info(f"Local TPUs: {jax.local_device_count()}")
logger.info(f"Global TPUs: {jax</s>
===========changed ref 1===========
# module: tools.train.train
def main():
# offset: 1
<s>f"Local TPUs: {jax.local_device_count()}")
logger.info(f"Global TPUs: {jax.device_count()}")
if training_args.assert_TPU_available:
assert (
jax.local_device_count() == 8
), "TPUs in use, please check running processes"
# Set up wandb run
if jax.process_index() == 0:
wandb.init(
entity=training_args.wandb_entity,
project=training_args.wandb_project,
job_type=training_args.wandb_job_type,
config=parser.parse_args(),
)
if training_args.resume_from_checkpoint is not None:
if jax.process_index() == 0:
artifact = wandb.run.use_artifact(training_args.resume_from_checkpoint)
else:
artifact = wandb.Api().artifact(training_args.resume_from_checkpoint)
artifact_dir = artifact.download()
# load model
model = DalleBart.from_pretrained(
artifact_dir,
dtype=getattr(jnp, model_args.dtype),
abstract_init=True,
)
# load tokenizer
tokenizer = DalleBartTokenizer.from_pretrained(
artifact_dir,
use_fast=True,
)
else:
# Set up our new model config
if model_args.config_name:
config = DalleBartConfig.from_pretrained(model_args.config_name)
else:
config = None
# Load or create new model
if model_args.model_name_or_path:
model = DalleBart.from_pretrained(
model_args.model_name_or_path,
</s>
===========changed ref 2===========
# module: tools.train.train
def main():
# offset: 2
<s>=config,
seed=training_args.seed_model,
dtype=getattr(jnp, model_args.dtype),
abstract_init=True,
)
else:
model = DalleBart(
config,
seed=training_args.seed_model,
dtype=getattr(jnp, model_args.dtype),
)
# Load tokenizer
if model_args.tokenizer_name is not None:
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name, use_fast=True
)
else:
tokenizer = DalleBartTokenizer.from_pretrained(
model_args.model_name_or_path,
use_fast=True,
)
# Preprocessing the datasets.
# We need to normalize and tokenize inputs and targets.
dataset.preprocess(
tokenizer=tokenizer,
decoder_start_token_id=model.config.decoder_start_token_id,
normalize_text=model.config.normalize_text,
max_length=model.config.max_text_length,
)
# Initialize our training
rng = jax.random.PRNGKey(training_args.seed_model)
rng, dropout_rng = jax.random.split(rng)
# Store some constant
num_epochs = training_args.num_train_epochs
# batch size per node
train_batch_size = (
training_args.per_device_train_batch_size * jax.local_device_count()
)
batch_size_per_node = train_batch_size * training_args.gradient_accumulation_steps
batch_size_per_step = batch_size_per_node * jax.process_count()
eval_batch_size = (
training_args.per_device_eval_batch</s>
|
tools.train.distributed_shampoo/matrix_inverse_pth_root
|
Modified
|
borisdayma~dalle-mini
|
8a9e367d3653df65b41e872ca752ced50c671c9c
|
feat: update distributed_shampoo + fix None spec
|
<25>:<add>
<add> assert matrix.shape[0] == matrix.shape[1]
|
# module: tools.train.distributed_shampoo
def matrix_inverse_pth_root(
matrix,
p,
num_iters=100,
ridge_epsilon=1e-6,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
):
<0> """Computes `matrix^(-1/p)`, where `p` is a positive integer.
<1>
<2> This function uses the Coupled newton iterations algorithm for
<3> the computation of a matrix's inverse pth root.
<4>
<5>
<6> References:
<7> [Functions of Matrices, Theory and Computation,
<8> Nicholas J Higham, Pg 184, Eq 7.18](
<9> https://epubs.siam.org/doi/book/10.1137/1.9780898717778)
<10>
<11> Args:
<12> matrix: the symmetric PSD matrix whose power it to be computed
<13> p: exponent, for p a positive integer.
<14> num_iters: Maximum number of iterations.
<15> ridge_epsilon: Ridge epsilon added to make the matrix positive definite.
<16> error_tolerance: Error indicator, useful for early termination.
<17> precision: precision XLA related flag, the available options are:
<18> a) lax.Precision.DEFAULT (better step time, but not precise)
<19> b) lax.Precision.HIGH (increased precision, slower)
<20> c) lax.Precision.HIGHEST (best possible precision, slowest)
<21>
<22> Returns:
<23> matrix^(-1/p)
<24> """
<25>
<26> # We use float32 for the matrix inverse pth root.
<27> # Switch to f64 if you have hardware that supports it.
<28> matrix_size = matrix.shape[0]
<29> alpha = jnp.asarray(-1.0 / p, jnp.float32)
<30> identity = jnp.eye(matrix_size, dtype=jnp.float32)
<31> _, max_ev = power_iteration(
<32> matrix=matrix, num_iters=100,</s>
|
===========below chunk 0===========
# module: tools.train.distributed_shampoo
def matrix_inverse_pth_root(
matrix,
p,
num_iters=100,
ridge_epsilon=1e-6,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
):
# offset: 1
)
ridge_epsilon = ridge_epsilon * jnp.maximum(max_ev, 1e-16)
def _unrolled_mat_pow_1(mat_m):
"""Computes mat_m^1."""
return mat_m
def _unrolled_mat_pow_2(mat_m):
"""Computes mat_m^2."""
return jnp.matmul(mat_m, mat_m, precision=precision)
def _unrolled_mat_pow_4(mat_m):
"""Computes mat_m^4."""
mat_pow_2 = _unrolled_mat_pow_2(mat_m)
return jnp.matmul(mat_pow_2, mat_pow_2, precision=precision)
def _unrolled_mat_pow_8(mat_m):
"""Computes mat_m^4."""
mat_pow_4 = _unrolled_mat_pow_4(mat_m)
return jnp.matmul(mat_pow_4, mat_pow_4, precision=precision)
def mat_power(mat_m, p):
"""Computes mat_m^p, for p == 1, 2, 4 or 8.
Args:
mat_m: a square matrix
p: a positive integer
Returns:
mat_m^p
"""
# We unrolled the loop for performance reasons.
exponent = jnp.round(jnp.log2(p))
return lax.switch(
jnp.asarray(exponent, jnp.int32),
[
_unrolled_mat_pow_1,
_unrolled_mat_pow_2,
_unrolled_mat_pow_4,</s>
===========below chunk 1===========
# module: tools.train.distributed_shampoo
def matrix_inverse_pth_root(
matrix,
p,
num_iters=100,
ridge_epsilon=1e-6,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
):
# offset: 2
<s>mat_pow_1,
_unrolled_mat_pow_2,
_unrolled_mat_pow_4,
_unrolled_mat_pow_8,
],
(mat_m),
)
def _iter_condition(state):
(i, unused_mat_m, unused_mat_h, unused_old_mat_h, error, run_step) = state
error_above_threshold = jnp.logical_and(error > error_tolerance, run_step)
return jnp.logical_and(i < num_iters, error_above_threshold)
def _iter_body(state):
(i, mat_m, mat_h, unused_old_mat_h, error, unused_run_step) = state
mat_m_i = (1 - alpha) * identity + alpha * mat_m
new_mat_m = jnp.matmul(mat_power(mat_m_i, p), mat_m, precision=precision)
new_mat_h = jnp.matmul(mat_h, mat_m_i, precision=precision)
new_error = jnp.max(jnp.abs(new_mat_m - identity))
# sometimes error increases after an iteration before decreasing and
# converging. 1.2 factor is used to bound the maximal allowed increase.
return (i + 1, new_mat_m, new_mat_h, mat_h, new_error, new_error < error * 1.2)
if matrix_size == 1:
resultant_mat_h = (matrix + ridge_</s>
===========below chunk 2===========
# module: tools.train.distributed_shampoo
def matrix_inverse_pth_root(
matrix,
p,
num_iters=100,
ridge_epsilon=1e-6,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
):
# offset: 3
<s>) ** alpha
error = 0
else:
damped_matrix = matrix + ridge_epsilon * identity
z = (1 + p) / (2 * jnp.linalg.norm(damped_matrix))
new_mat_m_0 = damped_matrix * z
new_error = jnp.max(jnp.abs(new_mat_m_0 - identity))
new_mat_h_0 = identity * jnp.power(z, 1.0 / p)
init_state = tuple([0, new_mat_m_0, new_mat_h_0, new_mat_h_0, new_error, True])
_, mat_m, mat_h, old_mat_h, error, convergence = lax.while_loop(
_iter_condition, _iter_body, init_state
)
error = jnp.max(jnp.abs(mat_m - identity))
is_converged = jnp.asarray(convergence, old_mat_h.dtype)
resultant_mat_h = is_converged * mat_h + (1 - is_converged) * old_mat_h
resultant_mat_h = jnp.asarray(resultant_mat_h, matrix.dtype)
return resultant_mat_h, error
===========unchanged ref 0===========
at: numpy.ndarray
__hash__: ClassVar[None]
astype(dtype: DTypeLike, order: _OrderKACF=..., casting: _CastingKind=..., subok: bool=..., copy: bool | _CopyMode=...) -> NDArray[Any]
astype(dtype: _DTypeLike[_ScalarType], order: _OrderKACF=..., casting: _CastingKind=..., subok: bool=..., copy: bool | _CopyMode=...) -> NDArray[_ScalarType]
at: numpy.random.mtrand
RandomState(seed: None | _ArrayLikeInt_co | BitGenerator=...)
at: numpy.random.mtrand.RandomState
_bit_generator: BitGenerator
uniform(low: _ArrayLikeFloat_co=..., high: _ArrayLikeFloat_co=..., size: None | _ShapeLike=...) -> ndarray[Any, dtype[float64]]
uniform(low: float=..., high: float=..., size: None=...) -> float
at: tools.train.distributed_shampoo
power_iteration(matrix, num_iters=100, error_tolerance=1e-6, precision=lax.Precision.HIGHEST)
at: tools.train.distributed_shampoo.power_iteration
matrix_size = matrix.shape[-1]
_iter_condition(state)
_iter_body(state)
v_0 = (
np.random.RandomState(1729).uniform(-1.0, 1.0, matrix_size).astype(matrix.dtype)
)
init_state = tuple([0, v_0, jnp.zeros([], dtype=matrix.dtype), v_0, True])
_, v_out, s_out, _, _ = lax.while_loop(_iter_condition, _iter_body, init_state)
_, v_out, s_out, _, _ = lax.while_loop(_iter_condition, _iter_body, init_state)
===========unchanged ref 1===========
v_out = v_out / jnp.linalg.norm(v_out)
_, v_out, s_out, _, _ = lax.while_loop(_iter_condition, _iter_body, init_state)
|
tools.train.distributed_shampoo/_convert_to_parameter_stats
|
Modified
|
borisdayma~dalle-mini
|
8a9e367d3653df65b41e872ca752ced50c671c9c
|
feat: update distributed_shampoo + fix None spec
|
<16>:<add> local_stat.training_metrics,
|
# module: tools.train.distributed_shampoo
def _convert_to_parameter_stats(global_stats, local_stat):
<0> """Creates parameter stats from sharded stats."""
<1> index_start = int(local_stat.index_start)
<2> index_end = int(len(local_stat.sizes)) + index_start
<3> statistics = global_stats.statistics[index_start:index_end, :, :]
<4> preconditioners = global_stats.preconditioners[index_start:index_end, :, :]
<5> new_statistics = []
<6> new_preconditioners = []
<7> for i, size in enumerate(local_stat.sizes):
<8> new_statistics.append(statistics[i][:size, :size])
<9> new_preconditioners.append(preconditioners[i][:size, :size])
<10> return ParameterStats(
<11> local_stat.diagonal_statistics,
<12> new_statistics,
<13> new_preconditioners,
<14> local_stat.diagonal_momentum,
<15> local_stat.momentum,
<16> )
<17>
|
===========unchanged ref 0===========
at: tools.train.distributed_shampoo.BlockPartitioner
partition(tensor)
at: tools.train.distributed_shampoo.Preconditioner
exponent_for_preconditioner(self)
preconditioned_grad(self, grad, preconditioners)
at: tools.train.distributed_shampoo.Preconditioner.__init__
self._transformed_shape = param.shape
self._transformed_shape = merge_small_dims(self._original_shape, block_size)
self._partitioner = BlockPartitioner(reshaped_param, block_size)
at: tools.train.distributed_shampoo.Preconditioner.preconditioned_grad
reshaped_grad = jnp.reshape(grad, self._transformed_shape)
partitioned_grads = self._partitioner.partition(reshaped_grad)
===========changed ref 0===========
# module: tools.train.distributed_shampoo
+ class InitFnState(NamedTuple):
+ init_fn: Any
+ pspec_fn: Any
+ shape_and_dtype_fn: Any
+
===========changed ref 1===========
# module: tools.train.distributed_shampoo
+ def init_training_metrics_pspec(num_statistics):
+ if num_statistics:
+ return TrainingMetrics(pjit.PartitionSpec())
+ else:
+ return TrainingMetrics(None)
+
===========changed ref 2===========
# module: tools.train.distributed_shampoo
+ def init_training_metrics_shapes(num_statistics):
+ if num_statistics:
+ return TrainingMetrics([[num_statistics], jnp.float32])
+ else:
+ return TrainingMetrics([None, jnp.float32])
+
===========changed ref 3===========
# module: tools.train.distributed_shampoo
+ def init_training_metrics(num_statistics):
+ if num_statistics:
+ return TrainingMetrics(jnp.zeros([num_statistics], jnp.float32))
+ else:
+ return TrainingMetrics([])
+
===========changed ref 4===========
# module: tools.train.distributed_shampoo
# For training extremely large model; We keep a global state with a concatenated
# statistics and preconditioner states for all vars. This is so that we can
# annotate the leading axis to be sharded to save memory at the cost of
# communication.
@struct.dataclass
class GlobalShardedParameterStats:
statistics: chex.Array # Statistics
preconditioners: chex.Array # Preconditioners
+ exponents: chex.Array # exponents
===========changed ref 5===========
# module: tools.train.distributed_shampoo
+ @struct.dataclass
+ class TrainingMetrics:
+ inverse_pth_root_errors: chex.Array # Error for inverse-pth roots.
+
===========changed ref 6===========
# module: tools.train.distributed_shampoo
# These are per-parameter local states; All statistics here mirror the parameter
# Thus the sharding is copied over from the param specification.
@struct.dataclass
class LocalShardedParameterStats:
"""State associated to each parameter of the model being trained."""
diagonal_statistics: QuantizedValue # Accumulator for diagonal preconditioner
diagonal_momentum: QuantizedValue # Momentum for the diagonal preconditioner
momentum: QuantizedValue # Momentum for the shampoo preconditioner
+ training_metrics: TrainingMetrics # Metrics (optional for training).
index_start: np.int32 = struct.field(
pytree_node=False
) # Index into global statistics array
sizes: Any = struct.field(pytree_node=False) # Sizes of the statistics.
===========changed ref 7===========
# module: tools.train.distributed_shampoo
+ # TODO(rohananil): Add more important metrics to track during training.
+
+
# Per parameter optimizer state used in data-parallel training.
class ParameterStats(NamedTuple):
"""State associated to each parameter of the model being trained."""
diagonal_statistics: QuantizedValue # Accumulator for diagonal preconditioner
statistics: List[Any] # Statistics (QuantizedValue, chex.Array)
preconditioners: List[Any] # Preconditioners (QuantizedValue, chex.Array)
diagonal_momentum: QuantizedValue # Momentum for the diagonal preconditioner
momentum: QuantizedValue # Momentum for the shampoo preconditioner
+ training_metrics: TrainingMetrics # Metrics (optional for training).
===========changed ref 8===========
# module: tools.train.distributed_shampoo
- """File copied from https://github.com/google-research/google-research/edit/master/scalable_shampoo/optax/distributed_shampoo.py"""
-
# coding=utf-8
+ # Copyright 2022 The Google Research Authors.
- # Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# An implementation of distributed Shampoo optimizer from:
#
# Scalable Second Order Optimization for Deep Learning
# Rohan Anil, Vineet Gupta, Tomer Koren, Kevin Regan, Yoram Singer
# Preprint Paper: https://arxiv.org/abs/2002.09018
#
# This implementation moves computation of inverse pth root back to the
# accelerator (if higher precision is available).
#
# Authors: Rohan Anil (rohananil at google dot com)
# & Vineet Gupta (vineet at google dot com)
#
"""Distributed Shampoo Implementation."""
===========changed ref 9===========
# module: tools.train.distributed_shampoo
def matrix_inverse_pth_root(
matrix,
p,
num_iters=100,
ridge_epsilon=1e-6,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
):
"""Computes `matrix^(-1/p)`, where `p` is a positive integer.
This function uses the Coupled newton iterations algorithm for
the computation of a matrix's inverse pth root.
References:
[Functions of Matrices, Theory and Computation,
Nicholas J Higham, Pg 184, Eq 7.18](
https://epubs.siam.org/doi/book/10.1137/1.9780898717778)
Args:
matrix: the symmetric PSD matrix whose power it to be computed
p: exponent, for p a positive integer.
num_iters: Maximum number of iterations.
ridge_epsilon: Ridge epsilon added to make the matrix positive definite.
error_tolerance: Error indicator, useful for early termination.
precision: precision XLA related flag, the available options are:
a) lax.Precision.DEFAULT (better step time, but not precise)
b) lax.Precision.HIGH (increased precision, slower)
c) lax.Precision.HIGHEST (best possible precision, slowest)
Returns:
matrix^(-1/p)
"""
+
+ assert matrix.shape[0] == matrix.shape[1]
# We use float32 for the matrix inverse pth root.
# Switch to f64 if you have hardware that supports it.
matrix_size = matrix.shape[0]
alpha = jnp.asarray(-1.0 / p, jnp.float32)
identity = jnp.eye(matrix_size, dtype=jnp.float32)
_, max_ev = power_iteration(
matrix=matrix, num_iters=100, error_tolerance=1e-6, precision=precision
</s>
|
tools.train.distributed_shampoo/_convert_from_parameter_stats
|
Modified
|
borisdayma~dalle-mini
|
8a9e367d3653df65b41e872ca752ced50c671c9c
|
feat: update distributed_shampoo + fix None spec
|
<5>:<add> parameter_stats.training_metrics,
|
# module: tools.train.distributed_shampoo
def _convert_from_parameter_stats(parameter_stats, local_stats):
<0> """Creates sharded stats from paramter stats."""
<1> return LocalShardedParameterStats(
<2> parameter_stats.diagonal_statistics,
<3> parameter_stats.diagonal_momentum,
<4> parameter_stats.momentum,
<5> local_stats.index_start,
<6> local_stats.sizes,
<7> )
<8>
|
===========changed ref 0===========
# module: tools.train.distributed_shampoo
def _convert_to_parameter_stats(global_stats, local_stat):
"""Creates parameter stats from sharded stats."""
index_start = int(local_stat.index_start)
index_end = int(len(local_stat.sizes)) + index_start
statistics = global_stats.statistics[index_start:index_end, :, :]
preconditioners = global_stats.preconditioners[index_start:index_end, :, :]
new_statistics = []
new_preconditioners = []
for i, size in enumerate(local_stat.sizes):
new_statistics.append(statistics[i][:size, :size])
new_preconditioners.append(preconditioners[i][:size, :size])
return ParameterStats(
local_stat.diagonal_statistics,
new_statistics,
new_preconditioners,
local_stat.diagonal_momentum,
local_stat.momentum,
+ local_stat.training_metrics,
)
===========changed ref 1===========
# module: tools.train.distributed_shampoo
+ class InitFnState(NamedTuple):
+ init_fn: Any
+ pspec_fn: Any
+ shape_and_dtype_fn: Any
+
===========changed ref 2===========
# module: tools.train.distributed_shampoo
+ def init_training_metrics_pspec(num_statistics):
+ if num_statistics:
+ return TrainingMetrics(pjit.PartitionSpec())
+ else:
+ return TrainingMetrics(None)
+
===========changed ref 3===========
# module: tools.train.distributed_shampoo
+ def init_training_metrics_shapes(num_statistics):
+ if num_statistics:
+ return TrainingMetrics([[num_statistics], jnp.float32])
+ else:
+ return TrainingMetrics([None, jnp.float32])
+
===========changed ref 4===========
# module: tools.train.distributed_shampoo
+ def init_training_metrics(num_statistics):
+ if num_statistics:
+ return TrainingMetrics(jnp.zeros([num_statistics], jnp.float32))
+ else:
+ return TrainingMetrics([])
+
===========changed ref 5===========
# module: tools.train.distributed_shampoo
# For training extremely large model; We keep a global state with a concatenated
# statistics and preconditioner states for all vars. This is so that we can
# annotate the leading axis to be sharded to save memory at the cost of
# communication.
@struct.dataclass
class GlobalShardedParameterStats:
statistics: chex.Array # Statistics
preconditioners: chex.Array # Preconditioners
+ exponents: chex.Array # exponents
===========changed ref 6===========
# module: tools.train.distributed_shampoo
+ @struct.dataclass
+ class TrainingMetrics:
+ inverse_pth_root_errors: chex.Array # Error for inverse-pth roots.
+
===========changed ref 7===========
# module: tools.train.distributed_shampoo
# These are per-parameter local states; All statistics here mirror the parameter
# Thus the sharding is copied over from the param specification.
@struct.dataclass
class LocalShardedParameterStats:
"""State associated to each parameter of the model being trained."""
diagonal_statistics: QuantizedValue # Accumulator for diagonal preconditioner
diagonal_momentum: QuantizedValue # Momentum for the diagonal preconditioner
momentum: QuantizedValue # Momentum for the shampoo preconditioner
+ training_metrics: TrainingMetrics # Metrics (optional for training).
index_start: np.int32 = struct.field(
pytree_node=False
) # Index into global statistics array
sizes: Any = struct.field(pytree_node=False) # Sizes of the statistics.
===========changed ref 8===========
# module: tools.train.distributed_shampoo
+ # TODO(rohananil): Add more important metrics to track during training.
+
+
# Per parameter optimizer state used in data-parallel training.
class ParameterStats(NamedTuple):
"""State associated to each parameter of the model being trained."""
diagonal_statistics: QuantizedValue # Accumulator for diagonal preconditioner
statistics: List[Any] # Statistics (QuantizedValue, chex.Array)
preconditioners: List[Any] # Preconditioners (QuantizedValue, chex.Array)
diagonal_momentum: QuantizedValue # Momentum for the diagonal preconditioner
momentum: QuantizedValue # Momentum for the shampoo preconditioner
+ training_metrics: TrainingMetrics # Metrics (optional for training).
===========changed ref 9===========
# module: tools.train.distributed_shampoo
- """File copied from https://github.com/google-research/google-research/edit/master/scalable_shampoo/optax/distributed_shampoo.py"""
-
# coding=utf-8
+ # Copyright 2022 The Google Research Authors.
- # Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# An implementation of distributed Shampoo optimizer from:
#
# Scalable Second Order Optimization for Deep Learning
# Rohan Anil, Vineet Gupta, Tomer Koren, Kevin Regan, Yoram Singer
# Preprint Paper: https://arxiv.org/abs/2002.09018
#
# This implementation moves computation of inverse pth root back to the
# accelerator (if higher precision is available).
#
# Authors: Rohan Anil (rohananil at google dot com)
# & Vineet Gupta (vineet at google dot com)
#
"""Distributed Shampoo Implementation."""
|
dalle_mini.data/Dataset.dataloader
|
Modified
|
borisdayma~dalle-mini
|
b7c745860644e3a5e57a5b8c17ead787ee2d9778
|
fix(train): consider correct batch size
|
<1>:<add> total_batch_size = per_device_batch_size * num_devices
<add> if gradient_accumulation_steps is not None:
<add> total_batch_size *= gradient_accumulation_steps
<4>:<del> per_device_batch_size: int,
<5>:<del> gradient_accumulation_steps: int,
<12>:<del> batch_size = (
<13>:<del> per_device_batch_size * num_devices * gradient_accumulation_steps
<14>:<del> )
<15>:<add> steps_per_epoch = len(dataset) // total_batch_size
<del> steps_per_epoch = len(dataset) // batch_size
<23>:<add> : steps_per_epoch * total_batch_size
<del> : steps_per_epoch * batch_size
<25>:<add> batch_idx = batch_idx.reshape((steps_per_epoch, total_batch_size))
<del> batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))
<32>:<add> lambda x: x.reshape(
<add> (gradient_accumulation_steps, -1) + x.shape[1:]
<add> ),
<del> lambda x: x.reshape((-1, per_device_batch_size) + x.shape[1:]),
<39>:<del> split: str,
<40>:<del> per_device_batch_size: int,
|
# module: dalle_mini.data
@dataclass
class Dataset:
def dataloader(
self, split, per_device_batch_size, gradient_accumulation_steps=None, epoch=None
):
<0> num_devices = jax.local_device_count()
<1>
<2> def _dataloader_datasets_non_streaming(
<3> dataset: Dataset,
<4> per_device_batch_size: int,
<5> gradient_accumulation_steps: int,
<6> rng: jax.random.PRNGKey = None,
<7> ):
<8> """
<9> Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices.
<10> Shuffle batches if rng is set.
<11> """
<12> batch_size = (
<13> per_device_batch_size * num_devices * gradient_accumulation_steps
<14> )
<15> steps_per_epoch = len(dataset) // batch_size
<16>
<17> if rng is not None:
<18> batch_idx = jax.random.permutation(rng, len(dataset))
<19> else:
<20> batch_idx = jnp.arange(len(dataset))
<21>
<22> batch_idx = batch_idx[
<23> : steps_per_epoch * batch_size
<24> ] # Skip incomplete batch.
<25> batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))
<26>
<27> for idx in batch_idx:
<28> batch = dataset[idx]
<29> batch = {k: jnp.array(v) for k, v in batch.items()}
<30> if gradient_accumulation_steps is not None:
<31> batch = jax.tree_map(
<32> lambda x: x.reshape((-1, per_device_batch_size) + x.shape[1:]),
<33> batch,
<34> )
<35> yield batch
<36>
<37> def _dataloader_datasets_streaming(
<38> dataset: Dataset,
<39> split: str,
<40> per_device_batch_size: int,
</s>
|
===========below chunk 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
def dataloader(
self, split, per_device_batch_size, gradient_accumulation_steps=None, epoch=None
):
# offset: 1
epoch: int,
):
keys = ["input_ids", "attention_mask", "labels", "decoder_input_ids"]
batch = {k: [] for k in keys}
first_loop = True # stop after one loop in some cases
while (self.multi_hosts and split == "train") or first_loop:
# in multi-host, we run forever (no epoch) as hosts need to stop
# at the same time and training data may not be split equally
# For validation data we put the entire set on each host as we could lose
# too many samples on pods
if epoch is not None:
# reshuffle training data at each epoch (not applicable with validation set)
dataset.set_epoch(epoch)
epoch += 1
for item in dataset:
for k, v in item.items():
batch[k].append(v)
# batch = 5, devices = 8, accumulation = 2 / batch_size = 5 x 8
# (40, 3, 3) -> shard 8 x (5, 3, 3)
# (16, 5, 3, 3) -> shard 8 x (2, 5, 3, 3)
if len(batch[keys[0]]) == per_device_batch_size * num_devices * (
gradient_accumulation_steps
if gradient_accumulation_steps is not None
else 1
):
batch = {k: jnp.array(v) for k, v in batch.items()}
if gradient_accumulation_steps is not None:
batch = jax.tree_map(
lambda x: x.reshape(
(-1, per_device_batch_size) + x.shape[1:]
),
batch,
)
yield batch
batch = {k: [] for k in keys}
first_loop</s>
===========below chunk 1===========
# module: dalle_mini.data
@dataclass
class Dataset:
def dataloader(
self, split, per_device_batch_size, gradient_accumulation_steps=None, epoch=None
):
# offset: 2
<s>,
batch,
)
yield batch
batch = {k: [] for k in keys}
first_loop = False
if split == "train":
ds = self.train_dataset
elif split == "eval":
ds = self.eval_dataset
else:
raise ValueError(f'split must be "train" or "eval", got {split}')
if self.streaming:
return _dataloader_datasets_streaming(
ds, split, per_device_batch_size, gradient_accumulation_steps, epoch
)
else:
if split == "train":
self.rng_dataset, input_rng = jax.random.split(self.rng_dataset)
return _dataloader_datasets_non_streaming(
ds, per_device_batch_size, gradient_accumulation_steps, input_rng
)
===========unchanged ref 0===========
at: dalle_mini.data
Dataset(dataset_repo_or_path: str, train_file: str=None, validation_file: str=None, streaming: bool=True, use_auth_token: bool=False, text_column: str="caption", encoding_column: str="encoding", max_train_samples: int=None, max_eval_samples: int=None, preprocessing_num_workers: int=None, overwrite_cache: bool=False, do_train: bool=False, do_eval: bool=True, seed_dataset: int=None, shard_by_host: bool=False, train_dataset: Dataset=field(init=False), eval_dataset: Dataset=field(init=False), rng_dataset: jnp.ndarray=field(init=False), multi_hosts: bool=field(init=False))
at: dalle_mini.data.Dataset
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
shard_by_host: bool = False
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
multi_hosts: bool = field(init=False)
at: dalle_mini.data.Dataset.__post_init__
self.multi_hosts = jax.process_count() > 1
===========unchanged ref 1===========
self.train_dataset = dataset["train"]
self.train_dataset = (
self.train_dataset.take(self.max_train_samples)
if self.streaming
else self.train_dataset.select(range(self.max_train_samples))
)
self.eval_dataset = dataset["validation"]
self.eval_dataset = (
self.eval_dataset.take(self.max_eval_samples)
if self.streaming
else self.eval_dataset.select(range(self.max_eval_samples))
)
at: dalle_mini.data.Dataset.preprocess
self.train_dataset = self.train_dataset.shuffle(1000, self.seed_dataset)
self.rng_dataset = jax.random.PRNGKey(self.seed_dataset)
===========changed ref 0===========
# module: tools.train.train
def main():
# See all possible arguments by passing the --help flag to this script.
parser = HfArgumentParser(
(ModelArguments, DataTrainingArguments, TrainingArguments)
)
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
if jax.process_index() == 0:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# Set the verbosity to info of the Transformers logger (on main process only):
logger.info(f"Training/evaluation parameters {training_args}")
# Load dataset
dataset = Dataset(
**asdict(data_args),
do_train=training_args.do_train,
do_eval=training_args.do_eval,
)
logger.info(f"Local TPUs: {jax.local_device_count()}")
logger.info(f"Global TPUs: {jax</s>
|
dalle_mini.data/Dataset.dataloader
|
Modified
|
borisdayma~dalle-mini
|
f25405838c9ce3d1835819b6a006d4cd731867da
|
feat(train): improve pjit speed
|
<0>:<del> num_devices = jax.local_device_count()
<1>:<del> total_batch_size = per_device_batch_size * num_devices
<2>:<del> if gradient_accumulation_steps is not None:
<3>:<del> total_batch_size *= gradient_accumulation_steps
<4>:<del>
<13>:<add> steps_per_epoch = len(dataset) // batch_size
<del> steps_per_epoch = len(dataset) // total_batch_size
<21>:<add> : steps_per_epoch * batch_size
<del> : steps_per_epoch * total_batch_size
<23>:<add> batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))
<del> batch_idx = batch_idx.reshape((steps_per_epoch, total_batch_size))
<28>:<del> if gradient_accumulation_steps is not None:
<29>:<del> batch = jax.tree_map(
<30>:<del> lambda x: x.reshape(
<31>:<del> (gradient_accumulation_steps, -1) + x.shape[1:]
<32>:<del> ),
<33>:<del> batch,
<34>:<del> )
|
# module: dalle_mini.data
@dataclass
class Dataset:
- def dataloader(
- self, split, per_device_batch_size, gradient_accumulation_steps=None, epoch=None
- ):
+ def dataloader(self, split, batch_size, epoch=None):
<0> num_devices = jax.local_device_count()
<1> total_batch_size = per_device_batch_size * num_devices
<2> if gradient_accumulation_steps is not None:
<3> total_batch_size *= gradient_accumulation_steps
<4>
<5> def _dataloader_datasets_non_streaming(
<6> dataset: Dataset,
<7> rng: jax.random.PRNGKey = None,
<8> ):
<9> """
<10> Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices.
<11> Shuffle batches if rng is set.
<12> """
<13> steps_per_epoch = len(dataset) // total_batch_size
<14>
<15> if rng is not None:
<16> batch_idx = jax.random.permutation(rng, len(dataset))
<17> else:
<18> batch_idx = jnp.arange(len(dataset))
<19>
<20> batch_idx = batch_idx[
<21> : steps_per_epoch * total_batch_size
<22> ] # Skip incomplete batch.
<23> batch_idx = batch_idx.reshape((steps_per_epoch, total_batch_size))
<24>
<25> for idx in batch_idx:
<26> batch = dataset[idx]
<27> batch = {k: jnp.array(v) for k, v in batch.items()}
<28> if gradient_accumulation_steps is not None:
<29> batch = jax.tree_map(
<30> lambda x: x.reshape(
<31> (gradient_accumulation_steps, -1) + x.shape[1:]
<32> ),
<33> batch,
<34> )
<35> yield batch
<36>
<37> def _dataloader_datasets_streaming(</s>
|
===========below chunk 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
- def dataloader(
- self, split, per_device_batch_size, gradient_accumulation_steps=None, epoch=None
- ):
+ def dataloader(self, split, batch_size, epoch=None):
# offset: 1
epoch: int,
):
keys = ["input_ids", "attention_mask", "labels", "decoder_input_ids"]
batch = {k: [] for k in keys}
first_loop = True # stop after one loop in some cases
while (self.multi_hosts and split == "train") or first_loop:
# in multi-host, we run forever (no epoch) as hosts need to stop
# at the same time and training data may not be split equally
# For validation data we put the entire set on each host as we could lose
# too many samples on pods
if epoch is not None:
# reshuffle training data at each epoch (not applicable with validation set)
dataset.set_epoch(epoch)
epoch += 1
for item in dataset:
for k, v in item.items():
batch[k].append(v)
if len(batch[keys[0]]) == total_batch_size:
batch = {k: jnp.array(v) for k, v in batch.items()}
if gradient_accumulation_steps is not None:
# training mode
batch = jax.tree_map(
lambda x: x.reshape(
(gradient_accumulation_steps, -1) + x.shape[1:]
),
batch,
)
yield batch
batch = {k: [] for k in keys}
first_loop = False
if split == "train":
ds = self.train_dataset
elif split == "eval":
ds = self.eval_dataset
else:
raise ValueError(f'split must be "train" or "eval", got {split}')
if self.streaming:
return _d</s>
===========below chunk 1===========
# module: dalle_mini.data
@dataclass
class Dataset:
- def dataloader(
- self, split, per_device_batch_size, gradient_accumulation_steps=None, epoch=None
- ):
+ def dataloader(self, split, batch_size, epoch=None):
# offset: 2
<s>'split must be "train" or "eval", got {split}')
if self.streaming:
return _dataloader_datasets_streaming(ds, epoch)
else:
if split == "train":
self.rng_dataset, input_rng = jax.random.split(self.rng_dataset)
return _dataloader_datasets_non_streaming(ds, input_rng)
===========unchanged ref 0===========
at: dalle_mini.data
Dataset(dataset_repo_or_path: str, train_file: str=None, validation_file: str=None, streaming: bool=True, use_auth_token: bool=False, text_column: str="caption", encoding_column: str="encoding", max_train_samples: int=None, max_eval_samples: int=None, preprocessing_num_workers: int=None, overwrite_cache: bool=False, do_train: bool=False, do_eval: bool=True, seed_dataset: int=None, shard_by_host: bool=False, train_dataset: Dataset=field(init=False), eval_dataset: Dataset=field(init=False), rng_dataset: jnp.ndarray=field(init=False), multi_hosts: bool=field(init=False))
at: dalle_mini.data.Dataset
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
shard_by_host: bool = False
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
multi_hosts: bool = field(init=False)
at: dalle_mini.data.Dataset.__post_init__
self.multi_hosts = jax.process_count() > 1
===========unchanged ref 1===========
self.train_dataset = dataset["train"]
self.train_dataset = (
self.train_dataset.take(self.max_train_samples)
if self.streaming
else self.train_dataset.select(range(self.max_train_samples))
)
self.eval_dataset = dataset["validation"]
self.eval_dataset = (
self.eval_dataset.take(self.max_eval_samples)
if self.streaming
else self.eval_dataset.select(range(self.max_eval_samples))
)
at: dalle_mini.data.Dataset.preprocess
self.train_dataset = self.train_dataset.shuffle(1000, self.seed_dataset)
self.rng_dataset = jax.random.PRNGKey(self.seed_dataset)
at: numpy.core._multiarray_umath
array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0, like=None, /)
===========changed ref 0===========
# module: tools.train.train
def main():
# See all possible arguments by passing the --help flag to this script.
parser = HfArgumentParser(
(ModelArguments, DataTrainingArguments, TrainingArguments)
)
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
if jax.process_index() == 0:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# Set the verbosity to info of the Transformers logger (on main process only):
logger.info(f"Training/evaluation parameters {training_args}")
# Load dataset
dataset = Dataset(
**asdict(data_args),
do_train=training_args.do_train,
do_eval=training_args.do_eval,
)
logger.info(f"Local TPUs: {jax.local_device_count()}")
logger.info(f"Global TPUs: {jax</s>
|
tools.train.train/MetricsLogger.__init__
|
Modified
|
borisdayma~dalle-mini
|
14abe8c827eb85ee4081efe0749eba2a6ee9d8b4
|
feat(train): another 25% faster
|
<0>:<add> self.step = int(state.step)
<del> self.step = state.step
|
# module: tools.train.train
class MetricsLogger:
def __init__(self, state):
<0> self.step = state.step
<1> self.time = time.perf_counter()
<2>
|
===========unchanged ref 0===========
at: time
perf_counter() -> float
at: tools.train.train.MetricsLogger.get_all_train_metrics
self.step = new_step
self.time = new_time
|
tools.train.train/ModelArguments.__post_init__
|
Modified
|
borisdayma~dalle-mini
|
225b6ff17f1736b6bf44580322a0c7e7b340003b
|
fix(train): opt_state_shape for distributed_shampoo
|
<7>:<del> raise ValueError("Need a dataset repository or path.")
|
# module: tools.train.train
@dataclass
class ModelArguments:
def __post_init__(self):
<0> if self.restore_state:
<1> assert (
<2> "/model-" in self.model_name_or_path
<3> ), "Restoring state only available with W&B artifact reference"
<4> self.state_artifact = self.model_name_or_path.replace(
<5> "/model-", "/state-", 1
<6> )
<7> raise ValueError("Need a dataset repository or path.")
<8>
|
===========unchanged ref 0===========
at: tools.train.train.ModelArguments
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. "
"Don't set if you want to train a model from scratch. "
"W&B artifact references are supported in addition to the sources supported by `PreTrainedModel`."
},
)
config_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained config name or path if not the same as model_name_or_path"
},
)
tokenizer_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained tokenizer name or path if not the same as model_name_or_path"
},
)
dtype: Optional[str] = field(
default="float32",
metadata={
"help": "Floating-point format in which the computations will be performed (not the model weights). Choose one of `[float32, float16, bfloat16]`."
},
)
restore_state: Optional[bool] = field(
default=False,
metadata={
"help": "Restore optimizer and training state associated with a wandb checkpoint."
},
)
state_artifact: str = field(init=False)
|
tools.train.train/MetricsLogger.__init__
|
Modified
|
borisdayma~dalle-mini
|
5f28cd252597cb4b5c8ac16f7b47511f1beb8d35
|
feat(train): more custom x-axis
|
<0>:<add> self.step = step
<del> self.step = int(state.step)
<2>:<add> self.state_dict = {}
|
# module: tools.train.train
class MetricsLogger:
+ def __init__(self, step):
- def __init__(self, state):
<0> self.step = int(state.step)
<1> self.time = time.perf_counter()
<2>
|
===========unchanged ref 0===========
at: time
perf_counter() -> float
at: tools.train.train.MetricsLogger.update_state_metrics
self.step = new_step
self.time = new_time
|
tools.train.train/MetricsLogger.log
|
Modified
|
borisdayma~dalle-mini
|
5f28cd252597cb4b5c8ac16f7b47511f1beb8d35
|
feat(train): more custom x-axis
|
<5>:<del> if step is not None:
<6>:<del> log_metrics["train/step"] = step
<7>:<del> wandb.log(log_metrics)
<8>:<add> wandb.log({**log_metrics, **self.state_dict})
|
# module: tools.train.train
class MetricsLogger:
- @staticmethod
+ def log(self, metrics, prefix=None):
- def log(metrics, step=None, prefix=None):
<0> if jax.process_index() == 0:
<1> log_metrics = {
<2> f"{prefix}/{k}" if prefix is not None else k: v
<3> for k, v in metrics.items()
<4> }
<5> if step is not None:
<6> log_metrics["train/step"] = step
<7> wandb.log(log_metrics)
<8>
|
===========unchanged ref 0===========
at: tools.train.train.MetricsLogger.__init__
self.state_dict = {}
at: tools.train.train.MetricsLogger.update_state_metrics
self.state_dict = {
f'train/{k.split("_")[-1]}': getattr(state, k)
for k in ["step", "epoch", "train_time", "train_samples"]
}
at: transformers.hf_argparser
HfArgumentParser(dataclass_types: Union[DataClassType, Iterable[DataClassType]], *, prog: Optional[str]=..., usage: Optional[str]=..., description: Optional[str]=..., epilog: Optional[str]=..., parents: Sequence[ArgumentParser]=..., formatter_class: _FormatterClass=..., prefix_chars: str=..., fromfile_prefix_chars: Optional[str]=..., argument_default: Any=..., conflict_handler: str=..., add_help: bool=..., allow_abbrev: bool=...)
at: wandb
log = _preinit.PreInitCallable("wandb.log", wandb_sdk.wandb_run.Run.log)
===========changed ref 0===========
# module: tools.train.train
class MetricsLogger:
+ def __init__(self, step):
- def __init__(self, state):
+ self.step = step
- self.step = int(state.step)
self.time = time.perf_counter()
+ self.state_dict = {}
===========changed ref 1===========
# module: tools.train.train
class MetricsLogger:
+ def update_state_metrics(self, state):
+ """Update internal state metrics (logged at each call to be used as x-axis)"""
+ self.state_dict = {
+ f'train/{k.split("_")[-1]}': getattr(state, k)
+ for k in ["step", "epoch", "train_time", "train_samples"]
+ }
+ # timing metrics
+ new_step = int(state.step)
+ new_time = time.perf_counter()
+ if new_step > self.step:
+ time_per_step = (new_time - self.time) / (new_step - self.step)
+ self.step = new_step
+ self.time = new_time
+ self.state_dict["train/time_per_step"] = time_per_step
+
===========changed ref 2===========
# module: tools.train.train
class MetricsLogger:
- def get_all_train_metrics(self, train_metrics, state):
- """Make a dict of training metrics to be logged"""
- metrics = train_metrics
- # get state parameters
- state_dict = {
- k.split("_")[-1]: getattr(state, k)
- for k in ["epoch", "train_time", "train_samples"]
- }
- # timing metrics
- new_step = int(state.step)
- new_time = time.perf_counter()
- if new_step > self.step:
- time_per_step = (new_time - self.time) / (new_step - self.step)
- self.step = new_step
- self.time = new_time
- state_dict["time_per_step"] = time_per_step
- return {**metrics, **state_dict}
-
|
tools.train.train/ModelArguments.__post_init__
|
Modified
|
borisdayma~dalle-mini
|
1bb3269c4cf5276a7f81f58c8af78efc63ed0c9a
|
feat: handle model parallel
|
<1>:<add> assert self.model_name_or_path is not None and (
<del> assert (
|
# module: tools.train.train
@dataclass
class ModelArguments:
def __post_init__(self):
<0> if self.restore_state:
<1> assert (
<2> "/model-" in self.model_name_or_path
<3> ), "Restoring state only available with W&B artifact reference"
<4> self.state_artifact = self.model_name_or_path.replace(
<5> "/model-", "/state-", 1
<6> )
<7>
|
===========unchanged ref 0===========
at: tools.train.train.ModelArguments
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. "
"Don't set if you want to train a model from scratch. "
"W&B artifact references are supported in addition to the sources supported by `PreTrainedModel`."
},
)
config_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained config name or path if not the same as model_name_or_path"
},
)
tokenizer_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained tokenizer name or path if not the same as model_name_or_path"
},
)
dtype: Optional[str] = field(
default="float32",
metadata={
"help": "Floating-point format in which the computations will be performed (not the model weights). Choose one of `[float32, float16, bfloat16]`."
},
)
restore_state: Optional[bool] = field(
default=False,
metadata={
"help": "Restore optimizer and training state associated with a wandb checkpoint."
},
)
state_artifact: str = field(init=False)
|
dalle_mini.model.configuration/DalleBartConfig.__init__
|
Modified
|
borisdayma~dalle-mini
|
1bb3269c4cf5276a7f81f58c8af78efc63ed0c9a
|
feat: handle model parallel
|
<26>:<del> # remove inferred keys to prevent errors when loading config (passed as kwargs)
<27>:<del> for k in [
<28>:<del> "pad_token_id",
<29>:<del> "bos_token_id",
<30>:<del>
|
<s>1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
classifier_dropout=0.0,
scale_embedding=False,
gradient_checkpointing=False,
use_cache=True,
is_encoder_decoder=True,
forced_eos_token_id=None,
tie_word_embeddings=False, # different modalities and sizes
+ do_sample=True,
**kwargs,
):
<0> self.normalize_text = normalize_text
<1> self.encoder_vocab_size = encoder_vocab_size
<2> self.image_vocab_size = image_vocab_size
<3> self.image_length = image_length
<4> self.max_text_length = max_text_length
<5> self.d_model = d_model
<6> self.encoder_ffn_dim = encoder_ffn_dim
<7> self.encoder_layers = encoder_layers
<8> self.encoder_attention_heads = encoder_attention_heads
<9> self.decoder_ffn_dim = decoder_ffn_dim
<10> self.decoder_layers = decoder_layers
<11> self.decoder_attention_heads = decoder_attention_heads
<12> self.dropout = dropout
<13> self.attention_dropout = attention_dropout
<14> self.activation_dropout = activation_dropout
<15> self.activation_function = activation_function
<16> self.init_std = init_std
<17> self.encoder_layerdrop = encoder_layerdrop
<18> self.decoder_layerdrop = decoder_layerdrop
<19> self.classifier_dropout = classifier_dropout
<20> self.use_cache = use_cache
<21> self.gradient_checkpointing = gradient_checkpointing
<22> self.scale_embedding = (
<23> scale_embedding # scale factor will be sqrt(d_model) if True
<24> )
<25>
<26> # remove inferred keys to prevent errors when loading config (passed as kwargs)
<27> for k in [
<28> "pad_token_id",
<29> "bos_token_id",
<30> </s>
|
===========below chunk 0===========
<s>_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
classifier_dropout=0.0,
scale_embedding=False,
gradient_checkpointing=False,
use_cache=True,
is_encoder_decoder=True,
forced_eos_token_id=None,
tie_word_embeddings=False, # different modalities and sizes
+ do_sample=True,
**kwargs,
):
# offset: 1
"decoder_start_token_id",
"min_length",
"max_length",
]:
kwargs.pop(k, None)
super().__init__(
pad_token_id=image_vocab_size
+ 1, # needed to avoid errors during generation (converted to jnp.array)
bos_token_id=image_vocab_size + 1, # set to unreachable values
eos_token_id=image_vocab_size + 1,
is_encoder_decoder=is_encoder_decoder,
decoder_start_token_id=image_vocab_size, # BOS appended to vocab
forced_eos_token_id=forced_eos_token_id,
tie_word_embeddings=tie_word_embeddings,
min_length=image_length + 1,
max_length=image_length + 1,
**kwargs,
)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get(
"force_bos_token_to_be_generated", False
):
self.forced_bos_token_id = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions."
"The config can simply be saved and uploaded again to be fixed."
)
===========unchanged ref 0===========
at: _warnings
warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None
warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None
at: transformers.configuration_utils.PretrainedConfig
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
_auto_class: Optional[str] = None
__init__(**kwargs)
__init__(self, **kwargs)
at: transformers.configuration_utils.PretrainedConfig.__init__
self.bos_token_id = kwargs.pop("bos_token_id", None)
at: typing.Mapping
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
get(key: _KT) -> Optional[_VT_co]
at: typing.MutableMapping
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
pop(key: _KT) -> _VT
===========changed ref 0===========
# module: tools.train.train
@dataclass
class ModelArguments:
def __post_init__(self):
if self.restore_state:
+ assert self.model_name_or_path is not None and (
- assert (
"/model-" in self.model_name_or_path
), "Restoring state only available with W&B artifact reference"
self.state_artifact = self.model_name_or_path.replace(
"/model-", "/state-", 1
)
===========changed ref 1===========
# module: tools.train.train
@dataclass
class TrainingArguments:
"""
Arguments pertaining to training parameters.
"""
output_dir: str = field(
metadata={
"help": "The output directory where the model predictions and checkpoints will be written."
},
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory. "
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(
default=False, metadata={"help": "Whether to run eval on the validation set."}
)
per_device_train_batch_size: int = field(
+ default=8,
+ metadata={"help": "Batch size per data parallel device for training."},
- default=8, metadata={"help": "Batch size per GPU/TPU/CPU for training."}
)
per_device_eval_batch_size: Optional[int] = field(
default=None,
metadata={
+ "help": "Batch size per data parallel device for evaluation. Same as training batch size if not set."
- "help": "Batch size per GPU/TPU/CPU for evaluation. Same as training batch size if not set."
},
)
gradient_accumulation_steps: int = field(
default=1,
metadata={
"help": "Number of updates steps to accumulate before performing an update pass."
},
)
learning_rate: float = field(
default=5e-5, metadata={"help": "The initial learning rate."}
)
optim: str = field(
default="distributed_shampoo",
metadata={
"help": 'The optimizer to use. Can be "distributed_shampoo" (default), "adam" or "adafactor"'
},
)
beta1</s>
===========changed ref 2===========
# module: tools.train.train
@dataclass
class TrainingArguments:
# offset: 1
<s> "distributed_shampoo" (default), "adam" or "adafactor"'
},
)
beta1: float = field(
default=0.9,
metadata={"help": "Beta1 for Adam & Distributed Shampoo."},
)
beta2: float = field(
default=0.999,
metadata={"help": "Beta2 for for Adam & Distributed Shampoo."},
)
adam_epsilon: float = field(
default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."}
)
max_grad_norm: float = field(
default=1.0, metadata={"help": "Max gradient norm for Adafactor."}
)
block_size: int = field(
default=1024,
metadata={"help": "Chunked size for large layers with Distributed Shampoo."},
)
start_preconditioning_step: int = field(
default=100,
metadata={"help": "Number of steps before starting to update preconditioner."},
)
preconditioning_compute_steps: int = field(
default=10, metadata={"help": "Number of steps to update preconditioner."}
)
skip_preconditioning_dim_size_gt: int = field(
default=4096,
metadata={"help": "Max size for preconditioning with Distributed Shampoo."},
)
optim_quantized: bool = field(
default=False,
metadata={
"help": "Whether to quantize optimizer (only supported with Distributed Shampoo)."
},
)
num_train_epochs: int = field(
default=3, metadata={"help": "Total number of training epochs to perform."}
)
warmup_steps: int = field(
default=</s>
|
dalle_mini.model.modeling/FlaxBartAttention.setup
|
Modified
|
borisdayma~dalle-mini
|
1bb3269c4cf5276a7f81f58c8af78efc63ed0c9a
|
feat: handle model parallel
|
<23>:<add> jnp.ones((1, self.config.image_length), dtype="bool"), dtype="bool"
<del> jnp.ones((1, self.config.image_length + 1), dtype="bool"), dtype="bool"
|
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def setup(self) -> None:
<0> self.head_dim = self.embed_dim // self.num_heads
<1> if self.head_dim * self.num_heads != self.embed_dim:
<2> raise ValueError(
<3> f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
<4> f" and `num_heads`: {self.num_heads})."
<5> )
<6>
<7> dense = partial(
<8> nn.Dense,
<9> self.embed_dim,
<10> use_bias=self.bias,
<11> dtype=self.dtype,
<12> kernel_init=jax.nn.initializers.normal(self.config.init_std),
<13> )
<14>
<15> self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
<16> self.out_proj = dense()
<17>
<18> self.dropout_layer = nn.Dropout(rate=self.dropout)
<19>
<20> if self.causal:
<21> # used only in decoder
<22> self.causal_mask = make_causal_mask(
<23> jnp.ones((1, self.config.image_length + 1), dtype="bool"), dtype="bool"
<24> )
<25>
|
===========unchanged ref 0===========
at: functools
partial(func: Callable[..., _T], *args: Any, **kwargs: Any)
partial(func, *args, **keywords, /) -> function with partial application()
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.init_std = init_std
at: transformers.models.bart.modeling_flax_bart.FlaxBartAttention
config: BartConfig
embed_dim: int
num_heads: int
dropout: float = 0.0
causal: bool = False
bias: bool = True
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
setup(self) -> None
===========changed ref 0===========
# module: tools.train.train
@dataclass
class ModelArguments:
def __post_init__(self):
if self.restore_state:
+ assert self.model_name_or_path is not None and (
- assert (
"/model-" in self.model_name_or_path
), "Restoring state only available with W&B artifact reference"
self.state_artifact = self.model_name_or_path.replace(
"/model-", "/state-", 1
)
===========changed ref 1===========
<s>1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
classifier_dropout=0.0,
scale_embedding=False,
gradient_checkpointing=False,
use_cache=True,
is_encoder_decoder=True,
forced_eos_token_id=None,
tie_word_embeddings=False, # different modalities and sizes
+ do_sample=True,
**kwargs,
):
self.normalize_text = normalize_text
self.encoder_vocab_size = encoder_vocab_size
self.image_vocab_size = image_vocab_size
self.image_length = image_length
self.max_text_length = max_text_length
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.classifier_dropout = classifier_dropout
self.use_cache = use_cache
self.gradient_checkpointing = gradient_checkpointing
self.scale_embedding = (
scale_embedding # scale factor will be sqrt(d_model) if True
)
- # remove inferred keys to prevent errors when loading config (passed as kwargs)
- for k in [
- "pad_token_id",
- "bos_token_id",
- "eos_token_id",
- "decoder_start_token_id",
- "min_</s>
===========changed ref 2===========
<s>_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
classifier_dropout=0.0,
scale_embedding=False,
gradient_checkpointing=False,
use_cache=True,
is_encoder_decoder=True,
forced_eos_token_id=None,
tie_word_embeddings=False, # different modalities and sizes
+ do_sample=True,
**kwargs,
):
# offset: 1
<s>",
- "eos_token_id",
- "decoder_start_token_id",
- "min_length",
- "max_length",
- ]:
- kwargs.pop(k, None)
+ # special token id's are appended to vocab if not provided
+ decoder_start_token_id = kwargs.pop("decoder_start_token_id", image_vocab_size)
+ bos_token_id = kwargs.pop("bos_token_id", image_vocab_size)
+ pad_token_id = kwargs.pop("pad_token_id", image_vocab_size)
+ eos_token_id = kwargs.pop("eos_token_id", image_vocab_size)
+
+ # we generate to image_length + 1 (for bos) by default
+ min_length = kwargs.pop("min_length", image_length + 1)
+ max_length = kwargs.pop("max_length", image_length + 1)
super().__init__(
- pad_token_id=image_vocab_size
- + 1, # needed to avoid errors during generation (converted to jnp.array)
- bos_token_id=image_vocab_size + 1, # set to unreachable values
- eos_token_id=image_vocab_size + 1,
+ # args required in parent class
is_encoder_decoder=is_encoder_decoder,</s>
===========changed ref 3===========
<s>_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
classifier_dropout=0.0,
scale_embedding=False,
gradient_checkpointing=False,
use_cache=True,
is_encoder_decoder=True,
forced_eos_token_id=None,
tie_word_embeddings=False, # different modalities and sizes
+ do_sample=True,
**kwargs,
):
# offset: 2
<s> tie_word_embeddings=tie_word_embeddings,
- decoder_start_token_id=image_vocab_size, # BOS appended to vocab
forced_eos_token_id=forced_eos_token_id,
+ decoder_start_token_id=decoder_start_token_id,
+ bos_token_id=bos_token_id,
+ pad_token_id=pad_token_id,
+ eos_token_id=eos_token_id,
- tie_word_embeddings=tie_word_embeddings,
+ min_length=min_length,
- min_length=image_length + 1,
+ max_length=max_length,
- max_length=image_length + 1,
+ do_sample=do_sample,
**kwargs,
)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get(
"force_bos_token_to_be_generated", False
):
self.forced_bos_token_id = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions."
"The config can simply be saved and uploaded again to be fixed."
)
|
dalle_mini.model.modeling/FlaxBartDecoder.setup
|
Modified
|
borisdayma~dalle-mini
|
1bb3269c4cf5276a7f81f58c8af78efc63ed0c9a
|
feat: handle model parallel
|
<12>:<add> self.config.image_length + self.offset, # image length for BOS
<del> self.config.image_length + 1 + self.offset, # image length + 1 for BOS
|
# module: dalle_mini.model.modeling
class FlaxBartDecoder(FlaxBartDecoder):
def setup(self):
<0> self.dropout_layer = nn.Dropout(rate=self.config.dropout)
<1>
<2> embed_dim = self.config.d_model
<3> self.padding_idx = self.config.pad_token_id
<4> self.embed_scale = (
<5> math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
<6> )
<7>
<8> # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
<9> # and adjust num_embeddings appropriately. Other models don't have this hack
<10> self.offset = 0
<11> self.embed_positions = nn.Embed(
<12> self.config.image_length + 1 + self.offset, # image length + 1 for BOS
<13> embed_dim,
<14> embedding_init=jax.nn.initializers.normal(self.config.init_std),
<15> )
<16>
<17> self.layers = FlaxBartDecoderLayerCollection(self.config, self.dtype)
<18> self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
<19>
|
===========unchanged ref 0===========
at: math
sqrt(x: SupportsFloat, /) -> float
at: transformers.configuration_utils.PretrainedConfig.__init__
self.pad_token_id = kwargs.pop("pad_token_id", None)
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.d_model = d_model
self.dropout = dropout
self.init_std = init_std
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
at: transformers.models.bart.modeling_flax_bart.FlaxBartDecoder
config: BartConfig
embed_tokens: nn.Embed
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
setup(self)
===========changed ref 0===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def setup(self) -> None:
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {self.num_heads})."
)
dense = partial(
nn.Dense,
self.embed_dim,
use_bias=self.bias,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
self.out_proj = dense()
self.dropout_layer = nn.Dropout(rate=self.dropout)
if self.causal:
# used only in decoder
self.causal_mask = make_causal_mask(
+ jnp.ones((1, self.config.image_length), dtype="bool"), dtype="bool"
- jnp.ones((1, self.config.image_length + 1), dtype="bool"), dtype="bool"
)
===========changed ref 1===========
# module: tools.train.train
@dataclass
class ModelArguments:
def __post_init__(self):
if self.restore_state:
+ assert self.model_name_or_path is not None and (
- assert (
"/model-" in self.model_name_or_path
), "Restoring state only available with W&B artifact reference"
self.state_artifact = self.model_name_or_path.replace(
"/model-", "/state-", 1
)
===========changed ref 2===========
<s>1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
classifier_dropout=0.0,
scale_embedding=False,
gradient_checkpointing=False,
use_cache=True,
is_encoder_decoder=True,
forced_eos_token_id=None,
tie_word_embeddings=False, # different modalities and sizes
+ do_sample=True,
**kwargs,
):
self.normalize_text = normalize_text
self.encoder_vocab_size = encoder_vocab_size
self.image_vocab_size = image_vocab_size
self.image_length = image_length
self.max_text_length = max_text_length
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.classifier_dropout = classifier_dropout
self.use_cache = use_cache
self.gradient_checkpointing = gradient_checkpointing
self.scale_embedding = (
scale_embedding # scale factor will be sqrt(d_model) if True
)
- # remove inferred keys to prevent errors when loading config (passed as kwargs)
- for k in [
- "pad_token_id",
- "bos_token_id",
- "eos_token_id",
- "decoder_start_token_id",
- "min_</s>
===========changed ref 3===========
<s>_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
classifier_dropout=0.0,
scale_embedding=False,
gradient_checkpointing=False,
use_cache=True,
is_encoder_decoder=True,
forced_eos_token_id=None,
tie_word_embeddings=False, # different modalities and sizes
+ do_sample=True,
**kwargs,
):
# offset: 1
<s>",
- "eos_token_id",
- "decoder_start_token_id",
- "min_length",
- "max_length",
- ]:
- kwargs.pop(k, None)
+ # special token id's are appended to vocab if not provided
+ decoder_start_token_id = kwargs.pop("decoder_start_token_id", image_vocab_size)
+ bos_token_id = kwargs.pop("bos_token_id", image_vocab_size)
+ pad_token_id = kwargs.pop("pad_token_id", image_vocab_size)
+ eos_token_id = kwargs.pop("eos_token_id", image_vocab_size)
+
+ # we generate to image_length + 1 (for bos) by default
+ min_length = kwargs.pop("min_length", image_length + 1)
+ max_length = kwargs.pop("max_length", image_length + 1)
super().__init__(
- pad_token_id=image_vocab_size
- + 1, # needed to avoid errors during generation (converted to jnp.array)
- bos_token_id=image_vocab_size + 1, # set to unreachable values
- eos_token_id=image_vocab_size + 1,
+ # args required in parent class
is_encoder_decoder=is_encoder_decoder,</s>
|
dalle_mini.data/Dataset.preprocess
|
Modified
|
borisdayma~dalle-mini
|
1bb3269c4cf5276a7f81f58c8af78efc63ed0c9a
|
feat: handle model parallel
|
<0>:<add> # get required config variables
<add> decoder_start_token_id = config.decoder_start_token_id
<add> normalize_text = config.normalize_text
<add> max_length = config.max_text_length
<add>
|
# module: dalle_mini.data
@dataclass
class Dataset:
+ def preprocess(self, tokenizer, config):
- def preprocess(self, tokenizer, decoder_start_token_id, normalize_text, max_length):
<0> if self.streaming:
<1> # we need to shuffle early in streaming mode
<2> if hasattr(self, "train_dataset"):
<3> self.train_dataset = self.train_dataset.shuffle(1000, self.seed_dataset)
<4> else:
<5> # prepare rng for later shuffling
<6> if self.seed_dataset is None:
<7> self.seed_dataset = np.random.get_state()[1][0]
<8> self.rng_dataset = jax.random.PRNGKey(self.seed_dataset)
<9>
<10> # normalize text
<11> if normalize_text:
<12> text_normalizer = TextNormalizer()
<13> partial_normalize_function = partial(
<14> normalize_function,
<15> text_column=self.text_column,
<16> text_normalizer=text_normalizer,
<17> )
<18> for ds in ["train_dataset", "eval_dataset"]:
<19> if hasattr(self, ds):
<20> setattr(
<21> self,
<22> ds,
<23> (
<24> getattr(self, ds).map(partial_normalize_function)
<25> if self.streaming
<26> else getattr(self, ds).map(
<27> partial_normalize_function,
<28> num_proc=self.preprocessing_num_workers,
<29> load_from_cache_file=not self.overwrite_cache,
<30> desc="Normalizing datasets",
<31> )
<32> ),
<33> )
<34>
<35> # preprocess
<36> partial_preprocess_function = partial(
<37> preprocess_function,
<38> tokenizer=tokenizer,
<39> text_column=self.text_column,
<40> encoding_column=self.encoding_column,
<41> max_length=max_length,
<42> decoder_start_token_id=decoder_start_token_id,
<43> )
<44> for</s>
|
===========below chunk 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
+ def preprocess(self, tokenizer, config):
- def preprocess(self, tokenizer, decoder_start_token_id, normalize_text, max_length):
# offset: 1
if hasattr(self, ds):
setattr(
self,
ds,
(
getattr(self, ds).map(
partial_preprocess_function,
batched=True,
)
if self.streaming
else getattr(self, ds).map(
partial_preprocess_function,
batched=True,
remove_columns=getattr(ds, "column_names"),
num_proc=self.preprocessing_num_workers,
load_from_cache_file=not self.overwrite_cache,
desc="Preprocessing datasets",
)
),
)
===========unchanged ref 0===========
at: dalle_mini.data
normalize_function(example, text_column, text_normalizer)
preprocess_function(examples, tokenizer, text_column, encoding_column, max_length, decoder_start_token_id)
at: dalle_mini.data.Dataset
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
shard_by_host: bool = False
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
multi_hosts: bool = field(init=False)
at: dalle_mini.data.Dataset.__post_init__
self.train_dataset = dataset["train"]
self.train_dataset = (
self.train_dataset.take(self.max_train_samples)
if self.streaming
else self.train_dataset.select(range(self.max_train_samples))
)
at: dalle_mini.data.Dataset.dataloader
self.rng_dataset, input_rng = jax.random.split(self.rng_dataset)
===========unchanged ref 1===========
at: datasets.arrow_dataset.Dataset
map(function: Optional[Callable]=None, with_indices: bool=False, with_rank: bool=False, input_columns: Optional[Union[str, List[str]]]=None, batched: bool=False, batch_size: Optional[int]=1000, drop_last_batch: bool=False, remove_columns: Optional[Union[str, List[str]]]=None, keep_in_memory: bool=False, load_from_cache_file: bool=None, cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, features: Optional[Features]=None, disable_nullable: bool=False, fn_kwargs: Optional[dict]=None, num_proc: Optional[int]=None, suffix_template: str="_{rank:05d}_of_{num_proc:05d}", new_fingerprint: Optional[str]=None, desc: Optional[str]=None) -> "Dataset"
wrapper(*, generator: Optional[np.random.Generator]=None, keep_in_memory: bool=False, load_from_cache_file: bool=True, indices_cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, new_fingerprint: Optional[str]=None)
===========unchanged ref 2===========
at: datasets.dataset_dict.DatasetDict
map(function: Optional[Callable]=None, with_indices: bool=False, with_rank: bool=False, input_columns: Optional[Union[str, List[str]]]=None, batched: bool=False, batch_size: Optional[int]=1000, drop_last_batch: bool=False, remove_columns: Optional[Union[str, List[str]]]=None, keep_in_memory: bool=False, load_from_cache_file: bool=True, cache_file_names: Optional[Dict[str, Optional[str]]]=None, writer_batch_size: Optional[int]=1000, features: Optional[Features]=None, disable_nullable: bool=False, fn_kwargs: Optional[dict]=None, num_proc: Optional[int]=None, desc: Optional[str]=None) -> "DatasetDict"
shuffle(seeds: Optional[Union[int, Dict[str, Optional[int]]]]=None, seed: Optional[int]=None, generators: Optional[Dict[str, np.random.Generator]]=None, keep_in_memory: bool=False, load_from_cache_file: bool=True, indices_cache_file_names: Optional[Dict[str, Optional[str]]]=None, writer_batch_size: Optional[int]=1000) -> "DatasetDict"
at: functools
partial(func: Callable[..., _T], *args: Any, **kwargs: Any)
partial(func, *args, **keywords, /) -> function with partial application()
at: numpy.random.mtrand
get_state = _rand.get_state
===========changed ref 0===========
# module: tools.train.train
@dataclass
class ModelArguments:
def __post_init__(self):
if self.restore_state:
+ assert self.model_name_or_path is not None and (
- assert (
"/model-" in self.model_name_or_path
), "Restoring state only available with W&B artifact reference"
self.state_artifact = self.model_name_or_path.replace(
"/model-", "/state-", 1
)
===========changed ref 1===========
# module: dalle_mini.model.modeling
class FlaxBartDecoder(FlaxBartDecoder):
def setup(self):
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
embed_dim = self.config.d_model
self.padding_idx = self.config.pad_token_id
self.embed_scale = (
math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
)
# Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 0
self.embed_positions = nn.Embed(
+ self.config.image_length + self.offset, # image length for BOS
- self.config.image_length + 1 + self.offset, # image length + 1 for BOS
embed_dim,
embedding_init=jax.nn.initializers.normal(self.config.init_std),
)
self.layers = FlaxBartDecoderLayerCollection(self.config, self.dtype)
self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
|
dalle_mini.data/Dataset.preprocess
|
Modified
|
borisdayma~dalle-mini
|
0952927a241297ea6e4e242e49cb45d275f43eb1
|
feat(train) - handle multiple nodes (#130)
|
<8>:<add> self.train_dataset = self.train_dataset.shuffle(5000, self.seed_dataset)
<del> self.train_dataset = self.train_dataset.shuffle(1000, self.seed_dataset)
|
# module: dalle_mini.data
@dataclass
class Dataset:
def preprocess(self, tokenizer, config):
<0> # get required config variables
<1> decoder_start_token_id = config.decoder_start_token_id
<2> normalize_text = config.normalize_text
<3> max_length = config.max_text_length
<4>
<5> if self.streaming:
<6> # we need to shuffle early in streaming mode
<7> if hasattr(self, "train_dataset"):
<8> self.train_dataset = self.train_dataset.shuffle(1000, self.seed_dataset)
<9> else:
<10> # prepare rng for later shuffling
<11> if self.seed_dataset is None:
<12> self.seed_dataset = np.random.get_state()[1][0]
<13> self.rng_dataset = jax.random.PRNGKey(self.seed_dataset)
<14>
<15> # normalize text
<16> if normalize_text:
<17> text_normalizer = TextNormalizer()
<18> partial_normalize_function = partial(
<19> normalize_function,
<20> text_column=self.text_column,
<21> text_normalizer=text_normalizer,
<22> )
<23> for ds in ["train_dataset", "eval_dataset"]:
<24> if hasattr(self, ds):
<25> setattr(
<26> self,
<27> ds,
<28> (
<29> getattr(self, ds).map(partial_normalize_function)
<30> if self.streaming
<31> else getattr(self, ds).map(
<32> partial_normalize_function,
<33> num_proc=self.preprocessing_num_workers,
<34> load_from_cache_file=not self.overwrite_cache,
<35> desc="Normalizing datasets",
<36> )
<37> ),
<38> )
<39>
<40> # preprocess
<41> partial_preprocess_function = partial(
<42> preprocess_function,
<43> tokenizer=tokenizer,
<44> text_column=self.text_column,
<45> encoding_column=self.encoding_column,
<46> max_length=max_</s>
|
===========below chunk 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
def preprocess(self, tokenizer, config):
# offset: 1
decoder_start_token_id=decoder_start_token_id,
)
for ds in ["train_dataset", "eval_dataset"]:
if hasattr(self, ds):
setattr(
self,
ds,
(
getattr(self, ds).map(
partial_preprocess_function,
batched=True,
)
if self.streaming
else getattr(self, ds).map(
partial_preprocess_function,
batched=True,
remove_columns=getattr(ds, "column_names"),
num_proc=self.preprocessing_num_workers,
load_from_cache_file=not self.overwrite_cache,
desc="Preprocessing datasets",
)
),
)
===========unchanged ref 0===========
at: dalle_mini.data
normalize_function(example, text_column, text_normalizer)
preprocess_function(examples, tokenizer, text_column, encoding_column, max_length, decoder_start_token_id)
at: dalle_mini.data.Dataset
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
shard_by_host: bool = False
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
multi_hosts: bool = field(init=False)
at: dalle_mini.data.Dataset.__post_init__
self.train_dataset = dataset["train"]
self.train_dataset = (
self.train_dataset.take(self.max_train_samples)
if self.streaming
else self.train_dataset.select(range(self.max_train_samples))
)
at: dalle_mini.data.Dataset.dataloader
self.rng_dataset, input_rng = jax.random.split(self.rng_dataset)
===========unchanged ref 1===========
at: datasets.arrow_dataset.Dataset
map(function: Optional[Callable]=None, with_indices: bool=False, with_rank: bool=False, input_columns: Optional[Union[str, List[str]]]=None, batched: bool=False, batch_size: Optional[int]=1000, drop_last_batch: bool=False, remove_columns: Optional[Union[str, List[str]]]=None, keep_in_memory: bool=False, load_from_cache_file: bool=None, cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, features: Optional[Features]=None, disable_nullable: bool=False, fn_kwargs: Optional[dict]=None, num_proc: Optional[int]=None, suffix_template: str="_{rank:05d}_of_{num_proc:05d}", new_fingerprint: Optional[str]=None, desc: Optional[str]=None) -> "Dataset"
wrapper(*, generator: Optional[np.random.Generator]=None, keep_in_memory: bool=False, load_from_cache_file: bool=True, indices_cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, new_fingerprint: Optional[str]=None)
===========unchanged ref 2===========
at: datasets.dataset_dict.DatasetDict
map(function: Optional[Callable]=None, with_indices: bool=False, with_rank: bool=False, input_columns: Optional[Union[str, List[str]]]=None, batched: bool=False, batch_size: Optional[int]=1000, drop_last_batch: bool=False, remove_columns: Optional[Union[str, List[str]]]=None, keep_in_memory: bool=False, load_from_cache_file: bool=True, cache_file_names: Optional[Dict[str, Optional[str]]]=None, writer_batch_size: Optional[int]=1000, features: Optional[Features]=None, disable_nullable: bool=False, fn_kwargs: Optional[dict]=None, num_proc: Optional[int]=None, desc: Optional[str]=None) -> "DatasetDict"
shuffle(seeds: Optional[Union[int, Dict[str, Optional[int]]]]=None, seed: Optional[int]=None, generators: Optional[Dict[str, np.random.Generator]]=None, keep_in_memory: bool=False, load_from_cache_file: bool=True, indices_cache_file_names: Optional[Dict[str, Optional[str]]]=None, writer_batch_size: Optional[int]=1000) -> "DatasetDict"
at: functools
partial(func: Callable[..., _T], *args: Any, **kwargs: Any)
partial(func, *args, **keywords, /) -> function with partial application()
at: numpy.random.mtrand
get_state = _rand.get_state
===========changed ref 0===========
# module: tools.train.train
def main():
# See all possible arguments by passing the --help flag to this script.
parser = HfArgumentParser(
(ModelArguments, DataTrainingArguments, TrainingArguments)
)
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
if jax.process_index() == 0:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# Set the verbosity to info of the Transformers logger (on main process only):
logger.info(f"Training/evaluation parameters {training_args}")
# Load dataset
dataset = Dataset(
**asdict(data_args),
do_train=training_args.do_train,
do_eval=training_args.do_eval,
)
logger.info(f"Local TPUs: {jax.local_device_count()}")
logger.info(f"Global TPUs: {jax</s>
|
dalle_mini.model.modeling/FlaxBartPreTrainedModel.__init__
|
Modified
|
borisdayma~dalle-mini
|
fa72aa7214ca2e948e61cf153e4c47baf02574f3
|
feat(modeling): simplify abstract_init
|
<19>:<add> init_fn = jax.jit(
<add> self.init_weights, static_argnames="input_shape", backend="cpu"
<del> init_fn = jax.jit(self.init_weights, static_argnums=(1,), backend="cpu")
<20>:<add> )
<28>:<del> init_fn = partial(init_fn, input_shape=input_shape)
<29>:<add> random_params = jax.eval_shape(
<del> random_params = jax.eval_shape(init_fn, self.key)
<30>:<add> init_fn, rng=self.key, input_shape=input_shape
<add> )
<31>:<add> random_params = init_fn(rng=self.key, input_shape=input_shape)
<del> random_params = init_fn(self.key, input_shape)
|
# module: dalle_mini.model.modeling
class FlaxBartPreTrainedModel(FlaxBartPreTrainedModel):
def __init__(
self,
config: DalleBartConfig,
input_shape: Tuple[int] = (1, 1),
seed: int = 0,
dtype: jnp.dtype = jnp.float32,
abstract_init: bool = False,
load_on_cpu: bool = False,
**kwargs,
):
<0> module = self.module_class(config=config, dtype=dtype, **kwargs)
<1>
<2> # adapted from HuggingFace FlaxPreTrainedModel
<3> if config is None:
<4> raise ValueError("config cannot be None")
<5>
<6> if module is None:
<7> raise ValueError("module cannot be None")
<8>
<9> # Those are private to be exposed as typed property on derived classes.
<10> self._config = config
<11> self._module = module
<12>
<13> # Those are public as their type is generic to every derived classes.
<14> self.key = PRNGKey(seed)
<15> self.dtype = dtype
<16>
<17> # init weights on CPU
<18> if load_on_cpu:
<19> init_fn = jax.jit(self.init_weights, static_argnums=(1,), backend="cpu")
<20> else:
<21> init_fn = self.init_weights
<22>
<23> # randomly initialized parameters
<24> if abstract_init:
<25> # init the model weights only abstractly, eval_shape will return a pytree
<26> # with the structure as weights but without any actual values, this will just contain
<27> # the shape information. Weights need to be loaded later.
<28> init_fn = partial(init_fn, input_shape=input_shape)
<29> random_params = jax.eval_shape(init_fn, self.key)
<30> else:
<31> random_params = init_fn(self.key, input_shape)
<32>
<33> # save required_params as set
<34> self._required_params = set(</s>
|
===========below chunk 0===========
<s> dalle_mini.model.modeling
class FlaxBartPreTrainedModel(FlaxBartPreTrainedModel):
def __init__(
self,
config: DalleBartConfig,
input_shape: Tuple[int] = (1, 1),
seed: int = 0,
dtype: jnp.dtype = jnp.float32,
abstract_init: bool = False,
load_on_cpu: bool = False,
**kwargs,
):
# offset: 1
self.params = random_params
===========unchanged ref 0===========
at: transformers.modeling_flax_utils.FlaxPreTrainedModel
config_class = None
base_model_prefix = ""
main_input_name = "input_ids"
_auto_class = None
_missing_keys = set()
at: transformers.models.bart.modeling_flax_bart.FlaxBartPreTrainedModel
config_class = BartConfig
base_model_prefix: str = "model"
module_class: nn.Module = None
__init__(self, config: BartConfig, input_shape: Tuple[int]=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs)
init_weights(rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict
at: typing
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
|
tools.train.distributed_shampoo/matrix_inverse_pth_root
|
Modified
|
borisdayma~dalle-mini
|
5996680850895aa267284ac17449c6a9604a9087
|
feat: update distributed_shampoo
|
# module: tools.train.distributed_shampoo
def matrix_inverse_pth_root(
matrix,
p,
num_iters=100,
ridge_epsilon=1e-6,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
):
<0> """Computes `matrix^(-1/p)`, where `p` is a positive integer.
<1>
<2> This function uses the Coupled newton iterations algorithm for
<3> the computation of a matrix's inverse pth root.
<4>
<5>
<6> References:
<7> [Functions of Matrices, Theory and Computation,
<8> Nicholas J Higham, Pg 184, Eq 7.18](
<9> https://epubs.siam.org/doi/book/10.1137/1.9780898717778)
<10>
<11> Args:
<12> matrix: the symmetric PSD matrix whose power it to be computed
<13> p: exponent, for p a positive integer.
<14> num_iters: Maximum number of iterations.
<15> ridge_epsilon: Ridge epsilon added to make the matrix positive definite.
<16> error_tolerance: Error indicator, useful for early termination.
<17> precision: precision XLA related flag, the available options are:
<18> a) lax.Precision.DEFAULT (better step time, but not precise)
<19> b) lax.Precision.HIGH (increased precision, slower)
<20> c) lax.Precision.HIGHEST (best possible precision, slowest)
<21>
<22> Returns:
<23> matrix^(-1/p)
<24> """
<25>
<26> assert matrix.shape[0] == matrix.shape[1]
<27>
<28> # We use float32 for the matrix inverse pth root.
<29> # Switch to f64 if you have hardware that supports it.
<30> matrix_size = matrix.shape[0]
<31> alpha = jnp.asarray(-1.0 / p, jnp.float32)
<32> identity = jnp.eye(matrix_size, dtype=jnp.float32)
<33> _, max_</s>
|
===========below chunk 0===========
# module: tools.train.distributed_shampoo
def matrix_inverse_pth_root(
matrix,
p,
num_iters=100,
ridge_epsilon=1e-6,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
):
# offset: 1
matrix=matrix, num_iters=100, error_tolerance=1e-6, precision=precision
)
ridge_epsilon = ridge_epsilon * jnp.maximum(max_ev, 1e-16)
def _unrolled_mat_pow_1(mat_m):
"""Computes mat_m^1."""
return mat_m
def _unrolled_mat_pow_2(mat_m):
"""Computes mat_m^2."""
return jnp.matmul(mat_m, mat_m, precision=precision)
def _unrolled_mat_pow_4(mat_m):
"""Computes mat_m^4."""
mat_pow_2 = _unrolled_mat_pow_2(mat_m)
return jnp.matmul(mat_pow_2, mat_pow_2, precision=precision)
def _unrolled_mat_pow_8(mat_m):
"""Computes mat_m^4."""
mat_pow_4 = _unrolled_mat_pow_4(mat_m)
return jnp.matmul(mat_pow_4, mat_pow_4, precision=precision)
def mat_power(mat_m, p):
"""Computes mat_m^p, for p == 1, 2, 4 or 8.
Args:
mat_m: a square matrix
p: a positive integer
Returns:
mat_m^p
"""
# We unrolled the loop for performance reasons.
exponent = jnp.round(jnp.log2(p))
return lax.switch(
jnp.asarray(exponent, jnp.int32),
[
_unrolled_mat_pow_1,</s>
===========below chunk 1===========
# module: tools.train.distributed_shampoo
def matrix_inverse_pth_root(
matrix,
p,
num_iters=100,
ridge_epsilon=1e-6,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
):
# offset: 2
<s> jnp.asarray(exponent, jnp.int32),
[
_unrolled_mat_pow_1,
_unrolled_mat_pow_2,
_unrolled_mat_pow_4,
_unrolled_mat_pow_8,
],
(mat_m),
)
def _iter_condition(state):
(i, unused_mat_m, unused_mat_h, unused_old_mat_h, error, run_step) = state
error_above_threshold = jnp.logical_and(error > error_tolerance, run_step)
return jnp.logical_and(i < num_iters, error_above_threshold)
def _iter_body(state):
(i, mat_m, mat_h, unused_old_mat_h, error, unused_run_step) = state
mat_m_i = (1 - alpha) * identity + alpha * mat_m
new_mat_m = jnp.matmul(mat_power(mat_m_i, p), mat_m, precision=precision)
new_mat_h = jnp.matmul(mat_h, mat_m_i, precision=precision)
new_error = jnp.max(jnp.abs(new_mat_m - identity))
# sometimes error increases after an iteration before decreasing and
# converging. 1.2 factor is used to bound the maximal allowed increase.
return (i + 1, new_mat_m, new_mat_h, mat_h, new_error, new_error < error * 1.2)
</s>
===========below chunk 2===========
# module: tools.train.distributed_shampoo
def matrix_inverse_pth_root(
matrix,
p,
num_iters=100,
ridge_epsilon=1e-6,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
):
# offset: 3
<s> if matrix_size == 1:
resultant_mat_h = (matrix + ridge_epsilon) ** alpha
error = 0
else:
damped_matrix = matrix + ridge_epsilon * identity
z = (1 + p) / (2 * jnp.linalg.norm(damped_matrix))
new_mat_m_0 = damped_matrix * z
new_error = jnp.max(jnp.abs(new_mat_m_0 - identity))
new_mat_h_0 = identity * jnp.power(z, 1.0 / p)
init_state = tuple([0, new_mat_m_0, new_mat_h_0, new_mat_h_0, new_error, True])
_, mat_m, mat_h, old_mat_h, error, convergence = lax.while_loop(
_iter_condition, _iter_body, init_state
)
error = jnp.max(jnp.abs(mat_m - identity))
is_converged = jnp.asarray(convergence, old_mat_h.dtype)
resultant_mat_h = is_converged * mat_h + (1 - is_converged) * old_mat_h
resultant_mat_h = jnp.asarray(resultant_mat_h, matrix.dtype)
return resultant_mat_h, error
===========unchanged ref 0===========
at: tools.train.distributed_shampoo
power_iteration(matrix, num_iters=100, error_tolerance=1e-6, precision=lax.Precision.HIGHEST)
===========changed ref 0===========
# module: tools.train.distributed_shampoo
class GraftingType(enum.IntEnum):
SGD = 1
ADAGRAD = 2
RMSPROP = 3
RMSPROP_NORMALIZED = 4
+ SQRT_N = 5
+ ADAGRAD_NORMALIZED = 5
|
|
dalle_mini.model.modeling/FlaxBartPreTrainedModel.__init__
|
Modified
|
borisdayma~dalle-mini
|
44b7c3e3b6ba100c285c6401d09cbb97c277e25d
|
fix: load from checkpoint
|
<19>:<add> # init weights on CPU
<del> init_fn = jax.jit(
<20>:<add> init_fn = jax.jit(self.init_weights, static_argnums=(1,), backend="cpu")
<del> self.init_weights, static_argnames="input_shape", backend="cpu"
<21>:<del> )
<23>:<add> init_fn = self.init_weigths
<del> init_fn = self.init_weights
<26>:<add> random_params = self.init_weights(self.key, input_shape)
<27>:<del> # init the model weights only abstractly, eval_shape will return a pytree
<28>:<del> # with the structure as weights but without any actual values, this will just contain
<29>:<del> # the shape information. Weights need to be loaded later.
<30>:<add> # only set shape and dtype, load parameters separately
<add> init_fn = partial(init_fn, input_shape=input_shape)
<add> random_params = jax.eval_shape(init_fn, self.key)
<del> random_params = jax.eval_shape(
<31>:<del> init_fn, rng=self.key, input_shape=input_shape
<32>:<del> )
<34>:<add> random_params = init_fn(self.key, input_shape)
<del> random_params = init_fn(rng=self.key, input_shape=input_shape)
|
# module: dalle_mini.model.modeling
class FlaxBartPreTrainedModel(FlaxBartPreTrainedModel):
def __init__(
self,
config: DalleBartConfig,
input_shape: Tuple[int] = (1, 1),
seed: int = 0,
dtype: jnp.dtype = jnp.float32,
abstract_init: bool = False,
load_on_cpu: bool = False,
**kwargs,
):
<0> module = self.module_class(config=config, dtype=dtype, **kwargs)
<1>
<2> # adapted from HuggingFace FlaxPreTrainedModel
<3> if config is None:
<4> raise ValueError("config cannot be None")
<5>
<6> if module is None:
<7> raise ValueError("module cannot be None")
<8>
<9> # Those are private to be exposed as typed property on derived classes.
<10> self._config = config
<11> self._module = module
<12>
<13> # Those are public as their type is generic to every derived classes.
<14> self.key = PRNGKey(seed)
<15> self.dtype = dtype
<16>
<17> # init weights on CPU
<18> if load_on_cpu:
<19> init_fn = jax.jit(
<20> self.init_weights, static_argnames="input_shape", backend="cpu"
<21> )
<22> else:
<23> init_fn = self.init_weights
<24>
<25> # randomly initialized parameters
<26> if abstract_init:
<27> # init the model weights only abstractly, eval_shape will return a pytree
<28> # with the structure as weights but without any actual values, this will just contain
<29> # the shape information. Weights need to be loaded later.
<30> random_params = jax.eval_shape(
<31> init_fn, rng=self.key, input_shape=input_shape
<32> )
<33> else:
<34> random_params = init_fn(rng=self.key, input_shape=input_shape)
<35>
<36> # save required_params as set
<37> </s>
|
===========below chunk 0===========
<s> dalle_mini.model.modeling
class FlaxBartPreTrainedModel(FlaxBartPreTrainedModel):
def __init__(
self,
config: DalleBartConfig,
input_shape: Tuple[int] = (1, 1),
seed: int = 0,
dtype: jnp.dtype = jnp.float32,
abstract_init: bool = False,
load_on_cpu: bool = False,
**kwargs,
):
# offset: 1
self.params = random_params
===========unchanged ref 0===========
at: dalle_mini.model.modeling.FlaxBartPreTrainedModel
config_class = DalleBartConfig
at: functools
partial(func: Callable[..., _T], *args: Any, **kwargs: Any)
partial(func, *args, **keywords, /) -> function with partial application()
at: transformers.modeling_flax_utils.FlaxPreTrainedModel
config_class = None
base_model_prefix = ""
main_input_name = "input_ids"
_auto_class = None
_missing_keys = set()
at: transformers.models.bart.modeling_flax_bart.FlaxBartPreTrainedModel
config_class = BartConfig
base_model_prefix: str = "model"
module_class: nn.Module = None
__init__(self, config: BartConfig, input_shape: Tuple[int]=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs)
init_weights(rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict
at: typing
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
|
dalle_mini.model.modeling/FlaxBartPreTrainedModel.__init__
|
Modified
|
borisdayma~dalle-mini
|
68cc185dfa4cb4d7e9125c1633a79e6326c95be0
|
fix: typo
|
<22>:<add> init_fn = self.init_weights
<del> init_fn = self.init_weigths
|
# module: dalle_mini.model.modeling
class FlaxBartPreTrainedModel(FlaxBartPreTrainedModel):
def __init__(
self,
config: DalleBartConfig,
input_shape: Tuple[int] = (1, 1),
seed: int = 0,
dtype: jnp.dtype = jnp.float32,
abstract_init: bool = False,
load_on_cpu: bool = False,
**kwargs,
):
<0> module = self.module_class(config=config, dtype=dtype, **kwargs)
<1>
<2> # adapted from HuggingFace FlaxPreTrainedModel
<3> if config is None:
<4> raise ValueError("config cannot be None")
<5>
<6> if module is None:
<7> raise ValueError("module cannot be None")
<8>
<9> # Those are private to be exposed as typed property on derived classes.
<10> self._config = config
<11> self._module = module
<12>
<13> # Those are public as their type is generic to every derived classes.
<14> self.key = PRNGKey(seed)
<15> self.dtype = dtype
<16>
<17> # init weights on CPU
<18> if load_on_cpu:
<19> # init weights on CPU
<20> init_fn = jax.jit(self.init_weights, static_argnums=(1,), backend="cpu")
<21> else:
<22> init_fn = self.init_weigths
<23>
<24> # randomly initialized parameters
<25> random_params = self.init_weights(self.key, input_shape)
<26> if abstract_init:
<27> # only set shape and dtype, load parameters separately
<28> init_fn = partial(init_fn, input_shape=input_shape)
<29> random_params = jax.eval_shape(init_fn, self.key)
<30> else:
<31> random_params = init_fn(self.key, input_shape)
<32>
<33> # save required_params as set
<34> self._required_params = set(flatten_dict(unfreeze(random_params</s>
|
===========below chunk 0===========
<s> dalle_mini.model.modeling
class FlaxBartPreTrainedModel(FlaxBartPreTrainedModel):
def __init__(
self,
config: DalleBartConfig,
input_shape: Tuple[int] = (1, 1),
seed: int = 0,
dtype: jnp.dtype = jnp.float32,
abstract_init: bool = False,
load_on_cpu: bool = False,
**kwargs,
):
# offset: 1
self.params = random_params
===========unchanged ref 0===========
at: functools
partial(func: Callable[..., _T], *args: Any, **kwargs: Any)
partial(func, *args, **keywords, /) -> function with partial application()
at: transformers.modeling_flax_utils.FlaxPreTrainedModel
config_class = None
base_model_prefix = ""
main_input_name = "input_ids"
_auto_class = None
_missing_keys = set()
at: transformers.models.bart.modeling_flax_bart.FlaxBartPreTrainedModel
config_class = BartConfig
base_model_prefix: str = "model"
module_class: nn.Module = None
__init__(self, config: BartConfig, input_shape: Tuple[int]=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs)
init_weights(rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict
at: typing
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
|
tools.train.train/ModelArguments.__post_init__
|
Modified
|
borisdayma~dalle-mini
|
4cb21dded0d29b16c33b79d51e8ead4a1848c4de
|
feat(train): simplify tokenizer loading
|
<0>:<add> if self.tokenizer_name is None:
<add> self.tokenizer_name == self.model_name_or_path
<add> assert (
<add> self.tokenizer_name is not None
<add> ), "Tokenizer name or model name/path needs to be specified"
|
# module: tools.train.train
@dataclass
class ModelArguments:
def __post_init__(self):
<0> if self.restore_state:
<1> assert self.model_name_or_path is not None and (
<2> "/model-" in self.model_name_or_path
<3> ), "Restoring state only available with W&B artifact reference"
<4> self.state_artifact = self.model_name_or_path.replace(
<5> "/model-", "/state-", 1
<6> )
<7>
|
===========unchanged ref 0===========
at: tools.train.train.ModelArguments
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. "
"Don't set if you want to train a model from scratch. "
"W&B artifact references are supported in addition to the sources supported by `PreTrainedModel`."
},
)
config_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained config name or path if not the same as model_name_or_path"
},
)
tokenizer_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained tokenizer name or path if not the same as model_name_or_path"
},
)
dtype: Optional[str] = field(
default="float32",
metadata={
"help": "Floating-point format in which the computations will be performed (not the model weights). Choose one of `[float32, float16, bfloat16]`."
},
)
restore_state: Optional[bool] = field(
default=False,
metadata={
"help": "Restore optimizer and training state associated with a wandb checkpoint."
},
)
state_artifact: str = field(init=False)
|
tools.train.train/ModelArguments.__post_init__
|
Modified
|
borisdayma~dalle-mini
|
34cf91cbbd16b3fedfff01ac01f7af4945ee37fb
|
feat: reduce artifact space + offset step
|
<9>:<del> self.state_artifact = self.model_name_or_path.replace(
<10>:<del> "/model-", "/state-", 1
<11>:<del> )
|
# module: tools.train.train
@dataclass
class ModelArguments:
def __post_init__(self):
<0> if self.tokenizer_name is None:
<1> self.tokenizer_name == self.model_name_or_path
<2> assert (
<3> self.tokenizer_name is not None
<4> ), "Tokenizer name or model name/path needs to be specified"
<5> if self.restore_state:
<6> assert self.model_name_or_path is not None and (
<7> "/model-" in self.model_name_or_path
<8> ), "Restoring state only available with W&B artifact reference"
<9> self.state_artifact = self.model_name_or_path.replace(
<10> "/model-", "/state-", 1
<11> )
<12>
|
===========unchanged ref 0===========
at: tools.train.train.ModelArguments
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. "
"Don't set if you want to train a model from scratch. "
"W&B artifact references are supported in addition to the sources supported by `PreTrainedModel`."
},
)
config_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained config name or path if not the same as model_name_or_path"
},
)
tokenizer_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained tokenizer name or path if not the same as model_name_or_path"
},
)
dtype: Optional[str] = field(
default="float32",
metadata={
"help": "Floating-point format in which the computations will be performed (not the model weights). Choose one of `[float32, float16, bfloat16]`."
},
)
restore_state: Optional[bool] = field(
default=False,
metadata={
"help": "Restore optimizer and training state. Can be True (will retrieve associated wandb artifact), a local directory or a Google bucket path."
},
)
at: tools.train.train.ModelArguments.get_opt_state
self.restore_state = Path(artifact_dir) / "opt_state.msgpack"
===========changed ref 0===========
# module: tools.train.train
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. "
"Don't set if you want to train a model from scratch. "
"W&B artifact references are supported in addition to the sources supported by `PreTrainedModel`."
},
)
config_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained config name or path if not the same as model_name_or_path"
},
)
tokenizer_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained tokenizer name or path if not the same as model_name_or_path"
},
)
dtype: Optional[str] = field(
default="float32",
metadata={
"help": "Floating-point format in which the computations will be performed (not the model weights). Choose one of `[float32, float16, bfloat16]`."
},
)
restore_state: Optional[bool] = field(
default=False,
metadata={
+ "help": "Restore optimizer and training state. Can be True (will retrieve associated wandb artifact), a local directory or a Google bucket path."
- "help": "Restore optimizer and training state associated with a wandb checkpoint."
},
)
- state_artifact: str = field(init=False)
-
|
dalle_mini.model.utils/PretrainedFromWandbMixin.from_pretrained
|
Modified
|
borisdayma~dalle-mini
|
34cf91cbbd16b3fedfff01ac01f7af4945ee37fb
|
feat: reduce artifact space + offset step
|
<3>:<add> with tempfile.TemporaryDirectory() as tmp_dir: # avoid multiple artifact copies
<add> if ":" in pretrained_model_name_or_path and not os.path.isdir(
<del> if ":" in pretrained_model_name_or_path and not os.path.isdir(
<4>:<add> pretrained_model_name_or_path
<del> pretrained_model_name_or_path
<5>:<add> ):
<del> ):
<6>:<add> # wandb artifact
<del> # wandb artifact
<7>:<add> if wandb.run is not None:
<del> if wandb.run is not None:
<8>:<add> artifact = wandb.run.use_artifact(pretrained_model_name_or_path)
<del> artifact = wandb.run.use_artifact(pretrained_model_name_or_path)
<9>:<add> else:
<del> else:
<10>:<add> artifact = wandb.Api().artifact(pretrained_model_name_or_path)
<del> artifact = wandb.Api().artifact(pretrained_model_name_or_path)
<11>:<add> pretrained_model_name_or_path = artifact.download(tmp_dir)
<del> pretrained_model_name_or_path = artifact.download()
<13>:<add> return super(PretrainedFromWandbMixin, cls).from_pretrained(
<del> return super(PretrainedFromWandbMixin, cls).from_pretrained(
<14>:<add> pretrained_model_name_or_path, *model_args, **kwargs
<del> pretrained_model_name_or_path, *model_args, **kwargs
<15>:<add> )
<del> )
|
# module: dalle_mini.model.utils
class PretrainedFromWandbMixin:
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
<0> """
<1> Initializes from a wandb artifact, or delegates loading to the superclass.
<2> """
<3> if ":" in pretrained_model_name_or_path and not os.path.isdir(
<4> pretrained_model_name_or_path
<5> ):
<6> # wandb artifact
<7> if wandb.run is not None:
<8> artifact = wandb.run.use_artifact(pretrained_model_name_or_path)
<9> else:
<10> artifact = wandb.Api().artifact(pretrained_model_name_or_path)
<11> pretrained_model_name_or_path = artifact.download()
<12>
<13> return super(PretrainedFromWandbMixin, cls).from_pretrained(
<14> pretrained_model_name_or_path, *model_args, **kwargs
<15> )
<16>
|
===========unchanged ref 0===========
at: os.path
isdir(s: AnyPath) -> bool
at: tempfile
TemporaryDirectory(suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=...)
at: wandb
Api = PublicApi
run: Optional["wandb_sdk.wandb_run.Run"] = None
at: wandb.apis.public.Api
_HTTP_TIMEOUT = env.get_http_timeout(9)
VIEWER_QUERY = gql(
"""
query Viewer{
viewer {
id
flags
entity
username
email
admin
apiKeys {
edges {
node {
id
name
description
}
}
}
teams {
edges {
node {
name
}
}
}
}
}
"""
)
USERS_QUERY = gql(
"""
query SearchUsers($query: String) {
users(query: $query) {
edges {
node {
id
flags
entity
admin
email
deletedAt
username
apiKeys {
edges {
node {
id
name
description
}
}
}
teams {
edges {
node {
name
}
}
}
}
}
}
}
"""
)
===========unchanged ref 1===========
CREATE_PROJECT = gql(
"""
mutation upsertModel(
$description: String
$entityName: String
$id: String
$name: String
$framework: String
$access: String
$views: JSONString
) {
upsertModel(
input: {
description: $description
entityName: $entityName
id: $id
name: $name
framework: $framework
access: $access
views: $views
}
) {
project {
id
name
entityName
description
access
views
}
model {
id
name
entityName
description
access
views
}
inserted
}
}
"""
)
artifact(name, type=None)
at: wandb.apis.public.Artifact
QUERY = gql(
"""
query ArtifactWithCurrentManifest(
$id: ID!,
) {
artifact(id: $id) {
currentManifest {
id
file {
id
directUrl
}
}
...ArtifactFragment
}
}
%s
"""
% ARTIFACT_FRAGMENT
)
download(root=None, recursive=False)
at: wandb.sdk.wandb_run.Run
_telemetry_obj: telemetry.TelemetryRecord
_telemetry_obj_active: bool
_telemetry_obj_dirty: bool
_telemetry_obj_flushed: bytes
_teardown_hooks: List[TeardownHook]
_tags: Optional[Tuple[Any, ...]]
_entity: Optional[str]
_project: Optional[str]
_group: Optional[str]
_job_type: Optional[str]
_name: Optional[str]
_notes: Optional[str]
_run_obj: Optional[RunRecord]
_run_obj_offline: Optional[RunRecord]
===========unchanged ref 2===========
_backend: Optional["wandb.sdk.backend.backend.Backend"]
_internal_run_interface: Optional[
Union[
"wandb.sdk.interface.interface_queue.InterfaceQueue",
"wandb.sdk.interface.interface_grpc.InterfaceGrpc",
]
]
_wl: Optional[_WandbSetup]
_out_redir: Optional[redirect.RedirectBase]
_err_redir: Optional[redirect.RedirectBase]
_redirect_cb: Optional[Callable[[str, str], None]]
_redirect_raw_cb: Optional[Callable[[str, str], None]]
_output_writer: Optional["filesystem.CRDedupedFile"]
_quiet: Optional[bool]
_atexit_cleanup_called: bool
_hooks: Optional[ExitHooks]
_exit_code: Optional[int]
_run_status_checker: Optional[RunStatusChecker]
_check_version: Optional["CheckVersionResponse"]
_sampled_history: Optional["SampledHistoryResponse"]
_final_summary: Optional["GetSummaryResponse"]
_poll_exit_handle: Optional[MailboxHandle]
_poll_exit_response: Optional[PollExitResponse]
_server_info_response: Optional[ServerInfoResponse]
_stdout_slave_fd: Optional[int]
_stderr_slave_fd: Optional[int]
_artifact_slots: List[str]
_init_pid: int
_attach_pid: int
_iface_pid: Optional[int]
_iface_port: Optional[int]
_attach_id: Optional[str]
_is_attached: bool
_settings: Settings
_launch_artifacts: Optional[Dict[str, Any]]
_printer: Union["PrinterTerm", "PrinterJupyter"]
===========unchanged ref 3===========
use_artifact(self, artifact_or_name: Union[str, public.Artifact, Artifact], type: Optional[str]=None, aliases: Optional[List[str]]=None, use_as: Optional[str]=None) -> Union[public.Artifact, Artifact]
===========changed ref 0===========
# module: tools.train.train
@dataclass
class ModelArguments:
+ def get_metadata(self):
+ if self.restore_state:
+ if jax.process_index() == 0:
+ artifact = wandb.run.use_artifact(self.model_name_or_path)
+ else:
+ artifact = wandb.Api().artifact(self.model_name_or_path)
+ return artifact.metadata
+ else:
+ return dict()
+
===========changed ref 1===========
# module: tools.train.train
@dataclass
class ModelArguments:
+ def get_opt_state(self, tmp_dir):
+ if self.restore_state is True:
+ # wandb artifact
+ state_artifact = self.model_name_or_path.replace("/model-", "/state-", 1)
+ if jax.process_index() == 0:
+ artifact = wandb.run.use_artifact(state_artifact)
+ else:
+ artifact = wandb.Api().artifact(state_artifact)
+ artifact_dir = artifact.download(tmp_dir)
+ self.restore_state = Path(artifact_dir) / "opt_state.msgpack"
+ return Path(self.restore_state).open("rb")
+
===========changed ref 2===========
# module: tools.train.train
@dataclass
class ModelArguments:
def __post_init__(self):
if self.tokenizer_name is None:
self.tokenizer_name == self.model_name_or_path
assert (
self.tokenizer_name is not None
), "Tokenizer name or model name/path needs to be specified"
if self.restore_state:
assert self.model_name_or_path is not None and (
"/model-" in self.model_name_or_path
), "Restoring state only available with W&B artifact reference"
- self.state_artifact = self.model_name_or_path.replace(
- "/model-", "/state-", 1
- )
|
tools.train.train/ModelArguments.get_opt_state
|
Modified
|
borisdayma~dalle-mini
|
50498e682e46e54fe1dfae8c205409f54ae68e89
|
feat(train): save to bucket
|
<0>:<add> with tempfile.TemporaryDirectory() as tmp_dir: # avoid multiple artifact copies
<add> if self.restore_state is True:
<del> if self.restore_state is True:
<1>:<add> # wandb artifact
<del> # wandb artifact
<2>:<add> state_artifact = self.model_name_or_path.replace(
<del> state_artifact = self.model_name_or_path.replace("/model-", "/state-", 1)
<3>:<add> "/model-", "/state-", 1
<add> )
<add> if jax.process_index() == 0:
<del> if jax.process_index() == 0:
<4>:<add> artifact = wandb.run.use_artifact(state_artifact)
<del> artifact = wandb.run.use_artifact(state_artifact)
<5>:<add> else:
<del> else:
<6>:<add> artifact = wandb.Api().artifact(state_artifact)
<del> artifact = wandb.Api().artifact(state_artifact)
<7>:<add> artifact_dir = artifact.download(tmp_dir)
<del> artifact_dir = artifact.download(tmp_dir)
<8>:<add> self.restore_state = Path(artifact_dir) / "opt_state.msgpack"
<del> self.restore_state = Path(artifact_dir) / "opt_state.msgpack"
<9>:<add> return Path(self.restore_state).open("rb")
<del> return Path(self.restore_state).open("rb")
|
# module: tools.train.train
@dataclass
class ModelArguments:
+ def get_opt_state(self):
- def get_opt_state(self, tmp_dir):
<0> if self.restore_state is True:
<1> # wandb artifact
<2> state_artifact = self.model_name_or_path.replace("/model-", "/state-", 1)
<3> if jax.process_index() == 0:
<4> artifact = wandb.run.use_artifact(state_artifact)
<5> else:
<6> artifact = wandb.Api().artifact(state_artifact)
<7> artifact_dir = artifact.download(tmp_dir)
<8> self.restore_state = Path(artifact_dir) / "opt_state.msgpack"
<9> return Path(self.restore_state).open("rb")
<10>
|
===========unchanged ref 0===========
at: tempfile
TemporaryDirectory(suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=...)
at: tools.train.train.ModelArguments
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. "
"Don't set if you want to train a model from scratch. "
"W&B artifact references are supported in addition to the sources supported by `PreTrainedModel`."
},
)
config_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained config name or path if not the same as model_name_or_path"
},
)
tokenizer_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained tokenizer name or path if not the same as model_name_or_path"
},
)
dtype: Optional[str] = field(
default="float32",
metadata={
"help": "Floating-point format in which the computations will be performed (not the model weights). Choose one of `[float32, float16, bfloat16]`."
},
)
restore_state: Optional[bool] = field(
default=False,
metadata={
"help": "Restore optimizer and training state. Can be True (will retrieve associated wandb artifact), a local directory or a Google bucket path."
},
)
at: tools.train.train.ModelArguments.get_opt_state
self.restore_state = Path(artifact_dir) / "opt_state.msgpack"
at: wandb
Api = PublicApi
run: Optional["wandb_sdk.wandb_run.Run"] = None
at: wandb.apis.public.Api
_HTTP_TIMEOUT = env.get_http_timeout(9)
===========unchanged ref 1===========
VIEWER_QUERY = gql(
"""
query Viewer{
viewer {
id
flags
entity
username
email
admin
apiKeys {
edges {
node {
id
name
description
}
}
}
teams {
edges {
node {
name
}
}
}
}
}
"""
)
USERS_QUERY = gql(
"""
query SearchUsers($query: String) {
users(query: $query) {
edges {
node {
id
flags
entity
admin
email
deletedAt
username
apiKeys {
edges {
node {
id
name
description
}
}
}
teams {
edges {
node {
name
}
}
}
}
}
}
}
"""
)
CREATE_PROJECT = gql(
"""
mutation upsertModel(
$description: String
$entityName: String
$id: String
$name: String
$framework: String
$access: String
$views: JSONString
) {
upsertModel(
input: {
description: $description
entityName: $entityName
id: $id
name: $name
framework: $framework
access: $access
views: $views
}
) {
project {
id
name
entityName
description
access
views
}
model {
id
name
entityName
description
access
views
}
inserted
}
}
"""
)
artifact(name, type=None)
at: wandb.sdk.wandb_run.Run
_telemetry_obj: telemetry.TelemetryRecord
_telemetry_obj_active: bool
===========unchanged ref 2===========
_telemetry_obj_dirty: bool
_telemetry_obj_flushed: bytes
_teardown_hooks: List[TeardownHook]
_tags: Optional[Tuple[Any, ...]]
_entity: Optional[str]
_project: Optional[str]
_group: Optional[str]
_job_type: Optional[str]
_name: Optional[str]
_notes: Optional[str]
_run_obj: Optional[RunRecord]
_run_obj_offline: Optional[RunRecord]
_backend: Optional["wandb.sdk.backend.backend.Backend"]
_internal_run_interface: Optional[
Union[
"wandb.sdk.interface.interface_queue.InterfaceQueue",
"wandb.sdk.interface.interface_grpc.InterfaceGrpc",
]
]
_wl: Optional[_WandbSetup]
_out_redir: Optional[redirect.RedirectBase]
_err_redir: Optional[redirect.RedirectBase]
_redirect_cb: Optional[Callable[[str, str], None]]
_redirect_raw_cb: Optional[Callable[[str, str], None]]
_output_writer: Optional["filesystem.CRDedupedFile"]
_quiet: Optional[bool]
_atexit_cleanup_called: bool
_hooks: Optional[ExitHooks]
_exit_code: Optional[int]
_run_status_checker: Optional[RunStatusChecker]
_check_version: Optional["CheckVersionResponse"]
_sampled_history: Optional["SampledHistoryResponse"]
_final_summary: Optional["GetSummaryResponse"]
_poll_exit_handle: Optional[MailboxHandle]
_poll_exit_response: Optional[PollExitResponse]
_server_info_response: Optional[ServerInfoResponse]
_stdout_slave_fd: Optional[int]
_stderr_slave_fd: Optional[int]
===========unchanged ref 3===========
_artifact_slots: List[str]
_init_pid: int
_attach_pid: int
_iface_pid: Optional[int]
_iface_port: Optional[int]
_attach_id: Optional[str]
_is_attached: bool
_settings: Settings
_launch_artifacts: Optional[Dict[str, Any]]
_printer: Union["PrinterTerm", "PrinterJupyter"]
use_artifact(self, artifact_or_name: Union[str, public.Artifact, Artifact], type: Optional[str]=None, aliases: Optional[List[str]]=None, use_as: Optional[str]=None) -> Union[public.Artifact, Artifact]
|
tools.train.train/ModelArguments.get_opt_state
|
Modified
|
borisdayma~dalle-mini
|
1c4e8392f382bd6fe8d26229d4f37c7d48cea337
|
feat: load from bucket
|
<11>:<add> if artifact.metadata.get("bucket_path"):
<add> self.restore_state = artifact.metadata["bucket_path"]
<add> else:
<add> self.restore_state = Path(artifact_dir) / "opt_state.msgpack"
<del> self.restore_state = Path(artifact_dir) / "opt_state.msgpack"
<12>:<del> return Path(self.restore_state).open("rb")
|
# module: tools.train.train
@dataclass
class ModelArguments:
def get_opt_state(self):
<0> with tempfile.TemporaryDirectory() as tmp_dir: # avoid multiple artifact copies
<1> if self.restore_state is True:
<2> # wandb artifact
<3> state_artifact = self.model_name_or_path.replace(
<4> "/model-", "/state-", 1
<5> )
<6> if jax.process_index() == 0:
<7> artifact = wandb.run.use_artifact(state_artifact)
<8> else:
<9> artifact = wandb.Api().artifact(state_artifact)
<10> artifact_dir = artifact.download(tmp_dir)
<11> self.restore_state = Path(artifact_dir) / "opt_state.msgpack"
<12> return Path(self.restore_state).open("rb")
<13>
|
===========unchanged ref 0===========
at: tempfile
TemporaryDirectory(suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=...)
at: tools.train.train.ModelArguments
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. "
"Don't set if you want to train a model from scratch. "
"W&B artifact references are supported in addition to the sources supported by `PreTrainedModel`."
},
)
config_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained config name or path if not the same as model_name_or_path"
},
)
tokenizer_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained tokenizer name or path if not the same as model_name_or_path"
},
)
dtype: Optional[str] = field(
default="float32",
metadata={
"help": "Floating-point format in which the computations will be performed (not the model weights). Choose one of `[float32, float16, bfloat16]`."
},
)
restore_state: Optional[bool] = field(
default=False,
metadata={
"help": "Restore optimizer and training state. Can be True (will retrieve associated wandb artifact), a local directory or a Google bucket path."
},
)
at: wandb
Api = PublicApi
run: Optional["wandb_sdk.wandb_run.Run"] = None
at: wandb.apis.public.Api
_HTTP_TIMEOUT = env.get_http_timeout(9)
===========unchanged ref 1===========
VIEWER_QUERY = gql(
"""
query Viewer{
viewer {
id
flags
entity
username
email
admin
apiKeys {
edges {
node {
id
name
description
}
}
}
teams {
edges {
node {
name
}
}
}
}
}
"""
)
USERS_QUERY = gql(
"""
query SearchUsers($query: String) {
users(query: $query) {
edges {
node {
id
flags
entity
admin
email
deletedAt
username
apiKeys {
edges {
node {
id
name
description
}
}
}
teams {
edges {
node {
name
}
}
}
}
}
}
}
"""
)
CREATE_PROJECT = gql(
"""
mutation upsertModel(
$description: String
$entityName: String
$id: String
$name: String
$framework: String
$access: String
$views: JSONString
) {
upsertModel(
input: {
description: $description
entityName: $entityName
id: $id
name: $name
framework: $framework
access: $access
views: $views
}
) {
project {
id
name
entityName
description
access
views
}
model {
id
name
entityName
description
access
views
}
inserted
}
}
"""
)
artifact(name, type=None)
===========unchanged ref 2===========
at: wandb.apis.public.Artifact
QUERY = gql(
"""
query ArtifactWithCurrentManifest(
$id: ID!,
) {
artifact(id: $id) {
currentManifest {
id
file {
id
directUrl
}
}
...ArtifactFragment
}
}
%s
"""
% ARTIFACT_FRAGMENT
)
download(root=None, recursive=False)
at: wandb.sdk.wandb_run.Run
_telemetry_obj: telemetry.TelemetryRecord
_telemetry_obj_active: bool
_telemetry_obj_dirty: bool
_telemetry_obj_flushed: bytes
_teardown_hooks: List[TeardownHook]
_tags: Optional[Tuple[Any, ...]]
_entity: Optional[str]
_project: Optional[str]
_group: Optional[str]
_job_type: Optional[str]
_name: Optional[str]
_notes: Optional[str]
_run_obj: Optional[RunRecord]
_run_obj_offline: Optional[RunRecord]
_backend: Optional["wandb.sdk.backend.backend.Backend"]
_internal_run_interface: Optional[
Union[
"wandb.sdk.interface.interface_queue.InterfaceQueue",
"wandb.sdk.interface.interface_grpc.InterfaceGrpc",
]
]
_wl: Optional[_WandbSetup]
_out_redir: Optional[redirect.RedirectBase]
_err_redir: Optional[redirect.RedirectBase]
_redirect_cb: Optional[Callable[[str, str], None]]
_redirect_raw_cb: Optional[Callable[[str, str], None]]
_output_writer: Optional["filesystem.CRDedupedFile"]
_quiet: Optional[bool]
_atexit_cleanup_called: bool
===========unchanged ref 3===========
_hooks: Optional[ExitHooks]
_exit_code: Optional[int]
_run_status_checker: Optional[RunStatusChecker]
_check_version: Optional["CheckVersionResponse"]
_sampled_history: Optional["SampledHistoryResponse"]
_final_summary: Optional["GetSummaryResponse"]
_poll_exit_handle: Optional[MailboxHandle]
_poll_exit_response: Optional[PollExitResponse]
_server_info_response: Optional[ServerInfoResponse]
_stdout_slave_fd: Optional[int]
_stderr_slave_fd: Optional[int]
_artifact_slots: List[str]
_init_pid: int
_attach_pid: int
_iface_pid: Optional[int]
_iface_port: Optional[int]
_attach_id: Optional[str]
_is_attached: bool
_settings: Settings
_launch_artifacts: Optional[Dict[str, Any]]
_printer: Union["PrinterTerm", "PrinterJupyter"]
use_artifact(self, artifact_or_name: Union[str, public.Artifact, Artifact], type: Optional[str]=None, aliases: Optional[List[str]]=None, use_as: Optional[str]=None) -> Union[public.Artifact, Artifact]
|
dalle_mini.model.utils/PretrainedFromWandbMixin.from_pretrained
|
Modified
|
borisdayma~dalle-mini
|
1c4e8392f382bd6fe8d26229d4f37c7d48cea337
|
feat: load from bucket
|
<1>:<add> Initializes from a wandb artifact, google bucket path or delegates loading to the superclass.
<del> Initializes from a wandb artifact, or delegates loading to the superclass.
<4>:<add> if (
<del> if ":" in pretrained_model_name_or_path and not os.path.isdir(
<5>:<add> ":" in pretrained_model_name_or_path
<del> pretrained_model_name_or_path
<6>:<add> and not os.path.isdir(pretrained_model_name_or_path)
<add> and not pretrained_model_name_or_path.startswith("gs")
<13>:<add> if artifact.metadata.get("bucket_path"):
<add> pretrained_model_name_or_path = artifact.metadata["bucket_path"]
<add>
<add> if pretrained_model_name_or_path.startswith("gs://"):
<add> copy_blobs(pretrained_model_name_or_path, tmp_dir)
<add> pretrained_model_name_or_path = tmp_dir
|
# module: dalle_mini.model.utils
class PretrainedFromWandbMixin:
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
<0> """
<1> Initializes from a wandb artifact, or delegates loading to the superclass.
<2> """
<3> with tempfile.TemporaryDirectory() as tmp_dir: # avoid multiple artifact copies
<4> if ":" in pretrained_model_name_or_path and not os.path.isdir(
<5> pretrained_model_name_or_path
<6> ):
<7> # wandb artifact
<8> if wandb.run is not None:
<9> artifact = wandb.run.use_artifact(pretrained_model_name_or_path)
<10> else:
<11> artifact = wandb.Api().artifact(pretrained_model_name_or_path)
<12> pretrained_model_name_or_path = artifact.download(tmp_dir)
<13>
<14> return super(PretrainedFromWandbMixin, cls).from_pretrained(
<15> pretrained_model_name_or_path, *model_args, **kwargs
<16> )
<17>
|
===========unchanged ref 0===========
at: os.path
isdir(s: AnyPath) -> bool
at: tempfile
TemporaryDirectory(suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=...)
at: wandb
Api = PublicApi
run: Optional["wandb_sdk.wandb_run.Run"] = None
at: wandb.apis.public.Api
_HTTP_TIMEOUT = env.get_http_timeout(9)
VIEWER_QUERY = gql(
"""
query Viewer{
viewer {
id
flags
entity
username
email
admin
apiKeys {
edges {
node {
id
name
description
}
}
}
teams {
edges {
node {
name
}
}
}
}
}
"""
)
USERS_QUERY = gql(
"""
query SearchUsers($query: String) {
users(query: $query) {
edges {
node {
id
flags
entity
admin
email
deletedAt
username
apiKeys {
edges {
node {
id
name
description
}
}
}
teams {
edges {
node {
name
}
}
}
}
}
}
}
"""
)
===========unchanged ref 1===========
CREATE_PROJECT = gql(
"""
mutation upsertModel(
$description: String
$entityName: String
$id: String
$name: String
$framework: String
$access: String
$views: JSONString
) {
upsertModel(
input: {
description: $description
entityName: $entityName
id: $id
name: $name
framework: $framework
access: $access
views: $views
}
) {
project {
id
name
entityName
description
access
views
}
model {
id
name
entityName
description
access
views
}
inserted
}
}
"""
)
artifact(name, type=None)
at: wandb.apis.public.Artifact
QUERY = gql(
"""
query ArtifactWithCurrentManifest(
$id: ID!,
) {
artifact(id: $id) {
currentManifest {
id
file {
id
directUrl
}
}
...ArtifactFragment
}
}
%s
"""
% ARTIFACT_FRAGMENT
)
download(root=None, recursive=False)
at: wandb.sdk.wandb_run.Run
_telemetry_obj: telemetry.TelemetryRecord
_telemetry_obj_active: bool
_telemetry_obj_dirty: bool
_telemetry_obj_flushed: bytes
_teardown_hooks: List[TeardownHook]
_tags: Optional[Tuple[Any, ...]]
_entity: Optional[str]
_project: Optional[str]
_group: Optional[str]
_job_type: Optional[str]
_name: Optional[str]
_notes: Optional[str]
_run_obj: Optional[RunRecord]
_run_obj_offline: Optional[RunRecord]
===========unchanged ref 2===========
_backend: Optional["wandb.sdk.backend.backend.Backend"]
_internal_run_interface: Optional[
Union[
"wandb.sdk.interface.interface_queue.InterfaceQueue",
"wandb.sdk.interface.interface_grpc.InterfaceGrpc",
]
]
_wl: Optional[_WandbSetup]
_out_redir: Optional[redirect.RedirectBase]
_err_redir: Optional[redirect.RedirectBase]
_redirect_cb: Optional[Callable[[str, str], None]]
_redirect_raw_cb: Optional[Callable[[str, str], None]]
_output_writer: Optional["filesystem.CRDedupedFile"]
_quiet: Optional[bool]
_atexit_cleanup_called: bool
_hooks: Optional[ExitHooks]
_exit_code: Optional[int]
_run_status_checker: Optional[RunStatusChecker]
_check_version: Optional["CheckVersionResponse"]
_sampled_history: Optional["SampledHistoryResponse"]
_final_summary: Optional["GetSummaryResponse"]
_poll_exit_handle: Optional[MailboxHandle]
_poll_exit_response: Optional[PollExitResponse]
_server_info_response: Optional[ServerInfoResponse]
_stdout_slave_fd: Optional[int]
_stderr_slave_fd: Optional[int]
_artifact_slots: List[str]
_init_pid: int
_attach_pid: int
_iface_pid: Optional[int]
_iface_port: Optional[int]
_attach_id: Optional[str]
_is_attached: bool
_settings: Settings
_launch_artifacts: Optional[Dict[str, Any]]
_printer: Union["PrinterTerm", "PrinterJupyter"]
===========unchanged ref 3===========
use_artifact(self, artifact_or_name: Union[str, public.Artifact, Artifact], type: Optional[str]=None, aliases: Optional[List[str]]=None, use_as: Optional[str]=None) -> Union[public.Artifact, Artifact]
===========changed ref 0===========
# module: tools.train.train
@dataclass
class ModelArguments:
def get_opt_state(self):
with tempfile.TemporaryDirectory() as tmp_dir: # avoid multiple artifact copies
if self.restore_state is True:
# wandb artifact
state_artifact = self.model_name_or_path.replace(
"/model-", "/state-", 1
)
if jax.process_index() == 0:
artifact = wandb.run.use_artifact(state_artifact)
else:
artifact = wandb.Api().artifact(state_artifact)
artifact_dir = artifact.download(tmp_dir)
+ if artifact.metadata.get("bucket_path"):
+ self.restore_state = artifact.metadata["bucket_path"]
+ else:
+ self.restore_state = Path(artifact_dir) / "opt_state.msgpack"
- self.restore_state = Path(artifact_dir) / "opt_state.msgpack"
- return Path(self.restore_state).open("rb")
|
dalle_mini.model.modeling/FlaxBartEncoderLayerCollection.setup
|
Modified
|
borisdayma~dalle-mini
|
5173ec7ed53b9590bb7e893c22cb49f86bdbb6b2
|
feat: handle gradient checkpointing
|
<1>:<add> nn.remat(FlaxBartEncoderLayer, concrete=True)
<del> nn.remat(FlaxBartEncoderLayer)
|
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayerCollection(FlaxBartEncoderLayerCollection):
def setup(self):
<0> layer_module = (
<1> nn.remat(FlaxBartEncoderLayer)
<2> if self.config.gradient_checkpointing
<3> else FlaxBartEncoderLayer
<4> )
<5> self.layers = [
<6> layer_module(self.config, name=str(i), dtype=self.dtype)
<7> for i in range(self.config.encoder_layers)
<8> ]
<9> self.layerdrop = self.config.encoder_layerdrop
<10>
|
===========unchanged ref 0===========
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.encoder_layers = encoder_layers
self.encoder_layerdrop = encoder_layerdrop
at: transformers.models.bart.modeling_flax_bart.FlaxBartEncoderLayerCollection
config: BartConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
setup(self)
===========changed ref 0===========
# module: tools.train.train
def main():
# See all possible arguments by passing the --help flag to this script.
parser = HfArgumentParser(
(ModelArguments, DataTrainingArguments, TrainingArguments)
)
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
if jax.process_index() == 0:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# Set the verbosity to info of the Transformers logger (on main process only):
logger.info(f"Training/evaluation parameters {training_args}")
# Load dataset
dataset = Dataset(
**asdict(data_args),
do_train=training_args.do_train,
do_eval=training_args.do_eval,
)
logger.info(f"Local TPUs: {jax.local_device_count()}")
logger.info(f"Global TPUs: {jax</s>
===========changed ref 1===========
# module: tools.train.train
def main():
# offset: 1
<s>f"Local TPUs: {jax.local_device_count()}")
logger.info(f"Global TPUs: {jax.device_count()}")
if training_args.assert_TPU_available:
assert (
jax.local_device_count() == 8
), "TPUs in use, please check running processes"
# Set up wandb run
if jax.process_index() == 0:
wandb.init(
entity=training_args.wandb_entity,
project=training_args.wandb_project,
job_type=training_args.wandb_job_type,
config=parser.parse_args(),
)
# Set up our new model config
if model_args.config_name:
config = DalleBartConfig.from_pretrained(model_args.config_name)
+ # initializing params with gradient checkpointing create issues
+ config.gradient_checkpointing = False
else:
config = None
# Load or create new model
if model_args.model_name_or_path:
model = DalleBart.from_pretrained(
model_args.model_name_or_path,
config=config,
seed=training_args.seed_model,
dtype=getattr(jnp, model_args.dtype),
abstract_init=True,
load_on_cpu=True,
)
else:
model = DalleBart(
config,
seed=training_args.seed_model,
dtype=getattr(jnp, model_args.dtype),
load_on_cpu=True,
)
# update model config per training args
+ # Done after initialization of weights to avoid issues with remat
+ # This is still considered correctly during training as function is pjitted
model.config.gradient</s>
===========changed ref 2===========
# module: tools.train.train
def main():
# offset: 2
<s>ing = training_args.gradient_checkpointing
+
+ # eval model cannot use remat
+ eval_config = copy.deepcopy(model.config)
+ eval_config.gradient_checkpointing = False
+
+ if training_args.gradient_checkpointing:
+ eval_model = DalleBart(
+ eval_config,
+ seed=training_args.seed_model,
+ dtype=getattr(jnp, model_args.dtype),
+ abstract_init=True,
+ load_on_cpu=True,
+ )
+ del eval_model._params
+ eval_fn = eval_model.__call__
+ else:
+ eval_fn = model.__call__
# get model metadata
model_metadata = model_args.get_metadata()
# get PartitionSpec for model params (required to be a dict)
param_spec = set_partitions(model.params)
# convert params to frozen dict
model._params = freeze(model.params)
# Load tokenizer
tokenizer = DalleBartTokenizer.from_pretrained(
model_args.tokenizer_name, use_fast=True
)
# Preprocessing the datasets.
# We need to normalize and tokenize inputs and targets.
dataset.preprocess(tokenizer=tokenizer, config=model.config)
# Initialize our training
dropout_rng = jax.random.PRNGKey(training_args.seed_model)
# Store some constant
num_epochs = training_args.num_train_epochs
# batch size
batch_size_per_node_per_grad_step = (
training_args.per_device_train_batch_size
* jax.local_device_count()
// training_args.mp_devices
)
batch_size_per_node = (
batch_size_</s>
|
dalle_mini.model.modeling/FlaxBartDecoderLayerCollection.setup
|
Modified
|
borisdayma~dalle-mini
|
5173ec7ed53b9590bb7e893c22cb49f86bdbb6b2
|
feat: handle gradient checkpointing
|
<1>:<add> nn.remat(FlaxBartDecoderLayer, concrete=True)
<del> nn.remat(FlaxBartDecoderLayer)
|
# module: dalle_mini.model.modeling
class FlaxBartDecoderLayerCollection(FlaxBartDecoderLayerCollection):
def setup(self):
<0> layer_module = (
<1> nn.remat(FlaxBartDecoderLayer)
<2> if self.config.gradient_checkpointing
<3> else FlaxBartDecoderLayer
<4> )
<5> self.layers = [
<6> layer_module(self.config, name=str(i), dtype=self.dtype)
<7> for i in range(self.config.decoder_layers)
<8> ]
<9> self.layerdrop = self.config.decoder_layerdrop
<10>
|
===========unchanged ref 0===========
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.decoder_layers = decoder_layers
self.decoder_layerdrop = decoder_layerdrop
at: transformers.models.bart.modeling_flax_bart.FlaxBartDecoderLayerCollection
config: BartConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
setup(self)
===========changed ref 0===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayerCollection(FlaxBartEncoderLayerCollection):
def setup(self):
layer_module = (
+ nn.remat(FlaxBartEncoderLayer, concrete=True)
- nn.remat(FlaxBartEncoderLayer)
if self.config.gradient_checkpointing
else FlaxBartEncoderLayer
)
self.layers = [
layer_module(self.config, name=str(i), dtype=self.dtype)
for i in range(self.config.encoder_layers)
]
self.layerdrop = self.config.encoder_layerdrop
===========changed ref 1===========
# module: tools.train.train
def main():
# See all possible arguments by passing the --help flag to this script.
parser = HfArgumentParser(
(ModelArguments, DataTrainingArguments, TrainingArguments)
)
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
if jax.process_index() == 0:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# Set the verbosity to info of the Transformers logger (on main process only):
logger.info(f"Training/evaluation parameters {training_args}")
# Load dataset
dataset = Dataset(
**asdict(data_args),
do_train=training_args.do_train,
do_eval=training_args.do_eval,
)
logger.info(f"Local TPUs: {jax.local_device_count()}")
logger.info(f"Global TPUs: {jax</s>
===========changed ref 2===========
# module: tools.train.train
def main():
# offset: 1
<s>f"Local TPUs: {jax.local_device_count()}")
logger.info(f"Global TPUs: {jax.device_count()}")
if training_args.assert_TPU_available:
assert (
jax.local_device_count() == 8
), "TPUs in use, please check running processes"
# Set up wandb run
if jax.process_index() == 0:
wandb.init(
entity=training_args.wandb_entity,
project=training_args.wandb_project,
job_type=training_args.wandb_job_type,
config=parser.parse_args(),
)
# Set up our new model config
if model_args.config_name:
config = DalleBartConfig.from_pretrained(model_args.config_name)
+ # initializing params with gradient checkpointing create issues
+ config.gradient_checkpointing = False
else:
config = None
# Load or create new model
if model_args.model_name_or_path:
model = DalleBart.from_pretrained(
model_args.model_name_or_path,
config=config,
seed=training_args.seed_model,
dtype=getattr(jnp, model_args.dtype),
abstract_init=True,
load_on_cpu=True,
)
else:
model = DalleBart(
config,
seed=training_args.seed_model,
dtype=getattr(jnp, model_args.dtype),
load_on_cpu=True,
)
# update model config per training args
+ # Done after initialization of weights to avoid issues with remat
+ # This is still considered correctly during training as function is pjitted
model.config.gradient</s>
===========changed ref 3===========
# module: tools.train.train
def main():
# offset: 2
<s>ing = training_args.gradient_checkpointing
+
+ # eval model cannot use remat
+ eval_config = copy.deepcopy(model.config)
+ eval_config.gradient_checkpointing = False
+
+ if training_args.gradient_checkpointing:
+ eval_model = DalleBart(
+ eval_config,
+ seed=training_args.seed_model,
+ dtype=getattr(jnp, model_args.dtype),
+ abstract_init=True,
+ load_on_cpu=True,
+ )
+ del eval_model._params
+ eval_fn = eval_model.__call__
+ else:
+ eval_fn = model.__call__
# get model metadata
model_metadata = model_args.get_metadata()
# get PartitionSpec for model params (required to be a dict)
param_spec = set_partitions(model.params)
# convert params to frozen dict
model._params = freeze(model.params)
# Load tokenizer
tokenizer = DalleBartTokenizer.from_pretrained(
model_args.tokenizer_name, use_fast=True
)
# Preprocessing the datasets.
# We need to normalize and tokenize inputs and targets.
dataset.preprocess(tokenizer=tokenizer, config=model.config)
# Initialize our training
dropout_rng = jax.random.PRNGKey(training_args.seed_model)
# Store some constant
num_epochs = training_args.num_train_epochs
# batch size
batch_size_per_node_per_grad_step = (
training_args.per_device_train_batch_size
* jax.local_device_count()
// training_args.mp_devices
)
batch_size_per_node = (
batch_size_</s>
|
tools.train.train/ModelArguments.get_opt_state
|
Modified
|
borisdayma~dalle-mini
|
d368fb6fb47e89a1dcb9d35dc7e038a5d4b8c89c
|
feat: add bucket reference to artifact
|
<10>:<del> artifact_dir = artifact.download(tmp_dir)
<12>:<add> # we will read directly file contents
<14>:<add> artifact_dir = artifact.download(tmp_dir)
<add> self.restore_state = str(Path(artifact_dir) / "opt_state.msgpack")
<del> self.restore_state = Path(artifact_dir) / "opt_state.msgpack"
|
# module: tools.train.train
@dataclass
class ModelArguments:
def get_opt_state(self):
<0> with tempfile.TemporaryDirectory() as tmp_dir: # avoid multiple artifact copies
<1> if self.restore_state is True:
<2> # wandb artifact
<3> state_artifact = self.model_name_or_path.replace(
<4> "/model-", "/state-", 1
<5> )
<6> if jax.process_index() == 0:
<7> artifact = wandb.run.use_artifact(state_artifact)
<8> else:
<9> artifact = wandb.Api().artifact(state_artifact)
<10> artifact_dir = artifact.download(tmp_dir)
<11> if artifact.metadata.get("bucket_path"):
<12> self.restore_state = artifact.metadata["bucket_path"]
<13> else:
<14> self.restore_state = Path(artifact_dir) / "opt_state.msgpack"
<15>
<16> if self.restore_state.startswith("gs://"):
<17> bucket_path = Path(self.restore_state[5:]) / "opt_state.msgpack"
<18> bucket, blob_name = str(bucket_path).split("/", 1)
<19> client = storage.Client()
<20> bucket = client.bucket(bucket)
<21> blob = bucket.blob(blob_name)
<22> return blob.download_as_bytes()
<23>
<24> with Path(self.restore_state).open("rb") as f:
<25> return f.read()
<26>
|
===========unchanged ref 0===========
at: pathlib
Path()
at: pathlib.Path
__slots__ = ()
open(mode: OpenBinaryMode, buffering: Literal[0], encoding: None=..., errors: None=..., newline: None=...) -> FileIO
open(mode: OpenBinaryModeReading, buffering: Literal[-1, 1]=..., encoding: None=..., errors: None=..., newline: None=...) -> BufferedReader
open(mode: OpenBinaryModeUpdating, buffering: Literal[-1, 1]=..., encoding: None=..., errors: None=..., newline: None=...) -> BufferedRandom
open(mode: OpenBinaryMode, buffering: int, encoding: None=..., errors: None=..., newline: None=...) -> BinaryIO
open(mode: str, buffering: int=..., encoding: Optional[str]=..., errors: Optional[str]=..., newline: Optional[str]=...) -> IO[Any]
open(mode: OpenBinaryModeWriting, buffering: Literal[-1, 1]=..., encoding: None=..., errors: None=..., newline: None=...) -> BufferedWriter
open(mode: OpenTextMode=..., buffering: int=..., encoding: Optional[str]=..., errors: Optional[str]=..., newline: Optional[str]=...) -> TextIOWrapper
at: tempfile
TemporaryDirectory(suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=...)
at: tools.train.train.ModelArguments
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. "
"Don't set if you want to train a model from scratch. "
"W&B artifact references are supported in addition to the sources supported by `PreTrainedModel`."
},
)
config_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained config name or path if not the same as model_name_or_path"
},
)
===========unchanged ref 1===========
tokenizer_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained tokenizer name or path if not the same as model_name_or_path"
},
)
dtype: Optional[str] = field(
default="float32",
metadata={
"help": "Floating-point format in which the computations will be performed (not the model weights). Choose one of `[float32, float16, bfloat16]`."
},
)
restore_state: Optional[bool] = field(
default=False,
metadata={
"help": "Restore optimizer and training state. Can be True (will retrieve associated wandb artifact), a local directory or a Google bucket path."
},
)
at: wandb
Api = PublicApi
run: Optional["wandb_sdk.wandb_run.Run"] = None
at: wandb.apis.public.Api
_HTTP_TIMEOUT = env.get_http_timeout(9)
VIEWER_QUERY = gql(
"""
query Viewer{
viewer {
id
flags
entity
username
email
admin
apiKeys {
edges {
node {
id
name
description
}
}
}
teams {
edges {
node {
name
}
}
}
}
}
"""
)
USERS_QUERY = gql(
"""
query SearchUsers($query: String) {
users(query: $query) {
edges {
node {
id
flags
entity
admin
email
deletedAt
username
apiKeys {
edges {
node {
id
name
description
}
}
}
teams {
edges {
node {
name
}
}
}
}
}
}
}
"""
)
===========unchanged ref 2===========
CREATE_PROJECT = gql(
"""
mutation upsertModel(
$description: String
$entityName: String
$id: String
$name: String
$framework: String
$access: String
$views: JSONString
) {
upsertModel(
input: {
description: $description
entityName: $entityName
id: $id
name: $name
framework: $framework
access: $access
views: $views
}
) {
project {
id
name
entityName
description
access
views
}
model {
id
name
entityName
description
access
views
}
inserted
}
}
"""
)
artifact(name, type=None)
at: wandb.apis.public.Artifact
QUERY = gql(
"""
query ArtifactWithCurrentManifest(
$id: ID!,
) {
artifact(id: $id) {
currentManifest {
id
file {
id
directUrl
}
}
...ArtifactFragment
}
}
%s
"""
% ARTIFACT_FRAGMENT
)
at: wandb.sdk.wandb_run.Run
_telemetry_obj: telemetry.TelemetryRecord
_telemetry_obj_active: bool
_telemetry_obj_dirty: bool
_telemetry_obj_flushed: bytes
_teardown_hooks: List[TeardownHook]
_tags: Optional[Tuple[Any, ...]]
_entity: Optional[str]
_project: Optional[str]
_group: Optional[str]
_job_type: Optional[str]
_name: Optional[str]
_notes: Optional[str]
_run_obj: Optional[RunRecord]
_run_obj_offline: Optional[RunRecord]
===========unchanged ref 3===========
_backend: Optional["wandb.sdk.backend.backend.Backend"]
_internal_run_interface: Optional[
Union[
"wandb.sdk.interface.interface_queue.InterfaceQueue",
"wandb.sdk.interface.interface_grpc.InterfaceGrpc",
]
]
_wl: Optional[_WandbSetup]
_out_redir: Optional[redirect.RedirectBase]
_err_redir: Optional[redirect.RedirectBase]
_redirect_cb: Optional[Callable[[str, str], None]]
_redirect_raw_cb: Optional[Callable[[str, str], None]]
_output_writer: Optional["filesystem.CRDedupedFile"]
_quiet: Optional[bool]
_atexit_cleanup_called: bool
_hooks: Optional[ExitHooks]
_exit_code: Optional[int]
_run_status_checker: Optional[RunStatusChecker]
_check_version: Optional["CheckVersionResponse"]
_sampled_history: Optional["SampledHistoryResponse"]
_final_summary: Optional["GetSummaryResponse"]
_poll_exit_handle: Optional[MailboxHandle]
_poll_exit_response: Optional[PollExitResponse]
_server_info_response: Optional[ServerInfoResponse]
_stdout_slave_fd: Optional[int]
_stderr_slave_fd: Optional[int]
_artifact_slots: List[str]
_init_pid: int
_attach_pid: int
_iface_pid: Optional[int]
_iface_port: Optional[int]
_attach_id: Optional[str]
_is_attached: bool
_settings: Settings
_launch_artifacts: Optional[Dict[str, Any]]
_printer: Union["PrinterTerm", "PrinterJupyter"]
|
dalle_mini.model.utils/PretrainedFromWandbMixin.from_pretrained
|
Modified
|
borisdayma~dalle-mini
|
803ccbf4021a8dcfd7e3c5192b9573b4f6a50c64
|
feat: support pod (#139)
|
<15>:<del> if artifact.metadata.get("bucket_path"):
<16>:<del> pretrained_model_name_or_path = artifact.metadata["bucket_path"]
<17>:<del>
<18>:<del> if pretrained_model_name_or_path.startswith("gs://"):
<19>:<del> copy_blobs(pretrained_model_name_or_path, tmp_dir)
<20>:<del> pretrained_model_name_or_path = tmp_dir
|
# module: dalle_mini.model.utils
class PretrainedFromWandbMixin:
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
<0> """
<1> Initializes from a wandb artifact, google bucket path or delegates loading to the superclass.
<2> """
<3> with tempfile.TemporaryDirectory() as tmp_dir: # avoid multiple artifact copies
<4> if (
<5> ":" in pretrained_model_name_or_path
<6> and not os.path.isdir(pretrained_model_name_or_path)
<7> and not pretrained_model_name_or_path.startswith("gs")
<8> ):
<9> # wandb artifact
<10> if wandb.run is not None:
<11> artifact = wandb.run.use_artifact(pretrained_model_name_or_path)
<12> else:
<13> artifact = wandb.Api().artifact(pretrained_model_name_or_path)
<14> pretrained_model_name_or_path = artifact.download(tmp_dir)
<15> if artifact.metadata.get("bucket_path"):
<16> pretrained_model_name_or_path = artifact.metadata["bucket_path"]
<17>
<18> if pretrained_model_name_or_path.startswith("gs://"):
<19> copy_blobs(pretrained_model_name_or_path, tmp_dir)
<20> pretrained_model_name_or_path = tmp_dir
<21>
<22> return super(PretrainedFromWandbMixin, cls).from_pretrained(
<23> pretrained_model_name_or_path, *model_args, **kwargs
<24> )
<25>
|
===========unchanged ref 0===========
at: dalle_mini.model.utils
PretrainedFromWandbMixin()
at: os.path
isdir(s: AnyPath) -> bool
at: tempfile
TemporaryDirectory(suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=...)
at: wandb
Api = PublicApi
run: Optional["wandb_sdk.wandb_run.Run"] = None
at: wandb.apis.public.Api
_HTTP_TIMEOUT = env.get_http_timeout(9)
VIEWER_QUERY = gql(
"""
query Viewer{
viewer {
id
flags
entity
username
email
admin
apiKeys {
edges {
node {
id
name
description
}
}
}
teams {
edges {
node {
name
}
}
}
}
}
"""
)
USERS_QUERY = gql(
"""
query SearchUsers($query: String) {
users(query: $query) {
edges {
node {
id
flags
entity
admin
email
deletedAt
username
apiKeys {
edges {
node {
id
name
description
}
}
}
teams {
edges {
node {
name
}
}
}
}
}
}
}
"""
)
===========unchanged ref 1===========
CREATE_PROJECT = gql(
"""
mutation upsertModel(
$description: String
$entityName: String
$id: String
$name: String
$framework: String
$access: String
$views: JSONString
) {
upsertModel(
input: {
description: $description
entityName: $entityName
id: $id
name: $name
framework: $framework
access: $access
views: $views
}
) {
project {
id
name
entityName
description
access
views
}
model {
id
name
entityName
description
access
views
}
inserted
}
}
"""
)
artifact(name, type=None)
at: wandb.apis.public.Artifact
QUERY = gql(
"""
query ArtifactWithCurrentManifest(
$id: ID!,
) {
artifact(id: $id) {
currentManifest {
id
file {
id
directUrl
}
}
...ArtifactFragment
}
}
%s
"""
% ARTIFACT_FRAGMENT
)
download(root=None, recursive=False)
at: wandb.sdk.wandb_run.Run
_telemetry_obj: telemetry.TelemetryRecord
_telemetry_obj_active: bool
_telemetry_obj_dirty: bool
_telemetry_obj_flushed: bytes
_teardown_hooks: List[TeardownHook]
_tags: Optional[Tuple[Any, ...]]
_entity: Optional[str]
_project: Optional[str]
_group: Optional[str]
_job_type: Optional[str]
_name: Optional[str]
_notes: Optional[str]
_run_obj: Optional[RunRecord]
_run_obj_offline: Optional[RunRecord]
===========unchanged ref 2===========
_backend: Optional["wandb.sdk.backend.backend.Backend"]
_internal_run_interface: Optional[
Union[
"wandb.sdk.interface.interface_queue.InterfaceQueue",
"wandb.sdk.interface.interface_grpc.InterfaceGrpc",
]
]
_wl: Optional[_WandbSetup]
_out_redir: Optional[redirect.RedirectBase]
_err_redir: Optional[redirect.RedirectBase]
_redirect_cb: Optional[Callable[[str, str], None]]
_redirect_raw_cb: Optional[Callable[[str, str], None]]
_output_writer: Optional["filesystem.CRDedupedFile"]
_quiet: Optional[bool]
_atexit_cleanup_called: bool
_hooks: Optional[ExitHooks]
_exit_code: Optional[int]
_run_status_checker: Optional[RunStatusChecker]
_check_version: Optional["CheckVersionResponse"]
_sampled_history: Optional["SampledHistoryResponse"]
_final_summary: Optional["GetSummaryResponse"]
_poll_exit_handle: Optional[MailboxHandle]
_poll_exit_response: Optional[PollExitResponse]
_server_info_response: Optional[ServerInfoResponse]
_stdout_slave_fd: Optional[int]
_stderr_slave_fd: Optional[int]
_artifact_slots: List[str]
_init_pid: int
_attach_pid: int
_iface_pid: Optional[int]
_iface_port: Optional[int]
_attach_id: Optional[str]
_is_attached: bool
_settings: Settings
_launch_artifacts: Optional[Dict[str, Any]]
_printer: Union["PrinterTerm", "PrinterJupyter"]
===========unchanged ref 3===========
use_artifact(self, artifact_or_name: Union[str, public.Artifact, Artifact], type: Optional[str]=None, aliases: Optional[List[str]]=None, use_as: Optional[str]=None) -> Union[public.Artifact, Artifact]
===========changed ref 0===========
+ # module: tools.train.scalable_shampoo.quantization_utils
+
+
===========changed ref 1===========
+ # module: tools.train.scalable_shampoo.quantization_utils
+ # pylint:disable=no-value-for-parameter
+ @struct.dataclass
+ class QuantizedValue:
+ @classmethod
+ def from_float_value(cls, fvalue, quantized_dtype, extract_diagonal=False):
+ if isinstance(fvalue, list) and not fvalue:
+ return QuantizedValue([], [], [], quantized_dtype, extract_diagonal, [])
+ quantized, diagonal_fvalue, bucket_size = QuantizedValue.quantize(
+ fvalue, quantized_dtype, extract_diagonal
+ )
+ return QuantizedValue(
+ quantized,
+ diagonal_fvalue,
+ bucket_size,
+ quantized_dtype,
+ extract_diagonal,
+ list(quantized.shape),
+ )
+
===========changed ref 2===========
+ # module: tools.train.scalable_shampoo.quantization_utils
+ # pylint:disable=no-value-for-parameter
+ @struct.dataclass
+ class QuantizedValue:
+ """State associated with quantized value."""
+
+ quantized: chex.Array
+ diagonal: chex.Array # Diagonal (if extract_diagonal is set)
+ bucket_size: chex.Array
+ quantized_dtype: jnp.dtype = struct.field(
+ pytree_node=False
+ ) # Dtype for the quantized value.
+ extract_diagonal: bool = struct.field(pytree_node=False) # In case its centered.
+ shape: Any = struct.field(pytree_node=False) # Shape of the tensor.
+
|
dalle_mini.model.modeling/FlaxBartPreTrainedModel.__init__
|
Modified
|
borisdayma~dalle-mini
|
803ccbf4021a8dcfd7e3c5192b9573b4f6a50c64
|
feat: support pod (#139)
|
<17>:<add> if init_weights:
<del> # init weights on CPU
<18>:<del> if load_on_cpu:
<19>:<del> # init weights on CPU
<20>:<del> init_fn = jax.jit(self.init_weights, static_argnums=(1,), backend="cpu")
<21>:<del> else:
<22>:<add> # get shape of params only
<add> random_params = self.init_weights(
<del> init_fn = self.init_weights
<23>:<add> self.key,
<add> input_shape,
<add> abstract_init=abstract_init,
<add> load_on_cpu=load_on_cpu,
<add> )
<24>:<del> # randomly initialized parameters
<25>:<del> random_params = self.init_weights(self.key, input_shape)
<26>:<del> if abstract_init:
<27>:<del> # only set shape and dtype, load parameters separately
<28>:<del> init_fn = partial(init_fn, input_shape=input_shape)
<29>:<del> random_params = jax.eval_shape(init_fn, self.key)
<30>:<del> else:
<31>:<del> random_params = init_fn(self.key, input_shape)
<32>:<add> # save required_params as set
<add> self._required_params = set(flatten_dict(unfreeze(random_params)).keys())
<add> self.params = random_params
<33>:<del> # save required_params as set
<34>:<del> self._required_params = set(flatten_dict(unfreeze(random_params
|
<s>model.modeling
class FlaxBartPreTrainedModel(FlaxBartPreTrainedModel):
def __init__(
self,
config: DalleBartConfig,
input_shape: Tuple[int] = (1, 1),
seed: int = 0,
dtype: jnp.dtype = jnp.float32,
abstract_init: bool = False,
load_on_cpu: bool = False,
+ init_weights: bool = True,
**kwargs,
):
<0> module = self.module_class(config=config, dtype=dtype, **kwargs)
<1>
<2> # adapted from HuggingFace FlaxPreTrainedModel
<3> if config is None:
<4> raise ValueError("config cannot be None")
<5>
<6> if module is None:
<7> raise ValueError("module cannot be None")
<8>
<9> # Those are private to be exposed as typed property on derived classes.
<10> self._config = config
<11> self._module = module
<12>
<13> # Those are public as their type is generic to every derived classes.
<14> self.key = PRNGKey(seed)
<15> self.dtype = dtype
<16>
<17> # init weights on CPU
<18> if load_on_cpu:
<19> # init weights on CPU
<20> init_fn = jax.jit(self.init_weights, static_argnums=(1,), backend="cpu")
<21> else:
<22> init_fn = self.init_weights
<23>
<24> # randomly initialized parameters
<25> random_params = self.init_weights(self.key, input_shape)
<26> if abstract_init:
<27> # only set shape and dtype, load parameters separately
<28> init_fn = partial(init_fn, input_shape=input_shape)
<29> random_params = jax.eval_shape(init_fn, self.key)
<30> else:
<31> random_params = init_fn(self.key, input_shape)
<32>
<33> # save required_params as set
<34> self._required_params = set(flatten_dict(unfreeze(random_params</s>
|
===========below chunk 0===========
<s>class FlaxBartPreTrainedModel(FlaxBartPreTrainedModel):
def __init__(
self,
config: DalleBartConfig,
input_shape: Tuple[int] = (1, 1),
seed: int = 0,
dtype: jnp.dtype = jnp.float32,
abstract_init: bool = False,
load_on_cpu: bool = False,
+ init_weights: bool = True,
**kwargs,
):
# offset: 1
self.params = random_params
===========unchanged ref 0===========
at: functools
partial(func: Callable[..., _T], *args: Any, **kwargs: Any)
partial(func, *args, **keywords, /) -> function with partial application()
at: transformers.modeling_flax_utils.FlaxPreTrainedModel
config_class = None
base_model_prefix = ""
main_input_name = "input_ids"
_auto_class = None
_missing_keys = set()
at: transformers.models.bart.modeling_flax_bart.FlaxBartPreTrainedModel
config_class = BartConfig
base_model_prefix: str = "model"
module_class: nn.Module = None
__init__(self, config: BartConfig, input_shape: Tuple[int]=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs)
init_weights(rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict
at: typing
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
===========changed ref 0===========
+ # module: tools.train.scalable_shampoo.quantization_utils
+
+
===========changed ref 1===========
+ # module: tools.train.scalable_shampoo.quantization_utils
+ # pylint:disable=no-value-for-parameter
+ @struct.dataclass
+ class QuantizedValue:
+ @classmethod
+ def from_float_value(cls, fvalue, quantized_dtype, extract_diagonal=False):
+ if isinstance(fvalue, list) and not fvalue:
+ return QuantizedValue([], [], [], quantized_dtype, extract_diagonal, [])
+ quantized, diagonal_fvalue, bucket_size = QuantizedValue.quantize(
+ fvalue, quantized_dtype, extract_diagonal
+ )
+ return QuantizedValue(
+ quantized,
+ diagonal_fvalue,
+ bucket_size,
+ quantized_dtype,
+ extract_diagonal,
+ list(quantized.shape),
+ )
+
===========changed ref 2===========
+ # module: tools.train.scalable_shampoo.quantization_utils
+ # pylint:disable=no-value-for-parameter
+ @struct.dataclass
+ class QuantizedValue:
+ """State associated with quantized value."""
+
+ quantized: chex.Array
+ diagonal: chex.Array # Diagonal (if extract_diagonal is set)
+ bucket_size: chex.Array
+ quantized_dtype: jnp.dtype = struct.field(
+ pytree_node=False
+ ) # Dtype for the quantized value.
+ extract_diagonal: bool = struct.field(pytree_node=False) # In case its centered.
+ shape: Any = struct.field(pytree_node=False) # Shape of the tensor.
+
===========changed ref 3===========
+ # module: tools.train.scalable_shampoo.quantization_utils
+ # pylint:disable=no-value-for-parameter
+ @struct.dataclass
+ class QuantizedValue:
+ def to_float(self):
+ """Returns the float value."""
+ if isinstance(self.quantized, list) and not self.quantized:
+ return self.quantized
+
+ if self.quantized_dtype == jnp.float32:
+ return self.quantized
+
+ if self.quantized_dtype == jnp.bfloat16:
+ return self.quantized.astype(jnp.float32)
+
+ float_dtype = self.bucket_size.dtype
+ bucket_size = self.bucket_size[jnp.newaxis, Ellipsis]
+ val = self.quantized.astype(float_dtype) * bucket_size
+ if self.extract_diagonal:
+ val += jnp.diag(self.diagonal)
+ return val
+
===========changed ref 4===========
+ # module: tools.train.scalable_shampoo.quantization_utils
+ # coding=utf-8
+ # Copyright 2022 The Google Research Authors.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License");
+ # you may not use this file except in compliance with the License.
+ # You may obtain a copy of the License at
+ #
+ # http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+
+ """Helper routines for quantization."""
+
===========changed ref 5===========
# module: dalle_mini.model.utils
class PretrainedFromWandbMixin:
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
"""
Initializes from a wandb artifact, google bucket path or delegates loading to the superclass.
"""
with tempfile.TemporaryDirectory() as tmp_dir: # avoid multiple artifact copies
if (
":" in pretrained_model_name_or_path
and not os.path.isdir(pretrained_model_name_or_path)
and not pretrained_model_name_or_path.startswith("gs")
):
# wandb artifact
if wandb.run is not None:
artifact = wandb.run.use_artifact(pretrained_model_name_or_path)
else:
artifact = wandb.Api().artifact(pretrained_model_name_or_path)
pretrained_model_name_or_path = artifact.download(tmp_dir)
- if artifact.metadata.get("bucket_path"):
- pretrained_model_name_or_path = artifact.metadata["bucket_path"]
-
- if pretrained_model_name_or_path.startswith("gs://"):
- copy_blobs(pretrained_model_name_or_path, tmp_dir)
- pretrained_model_name_or_path = tmp_dir
return super(PretrainedFromWandbMixin, cls).from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs
)
|
tools.train.train/TrainingArguments.__post_init__
|
Modified
|
borisdayma~dalle-mini
|
803ccbf4021a8dcfd7e3c5192b9573b4f6a50c64
|
feat: support pod (#139)
|
<0>:<add> if self.assert_TPU_available:
<add> assert (
<add> jax.local_device_count() == 8
<add> ), "TPUs in use, please check running processes"
<5>:<add> assert self.graft_type in [
<add> "rmsprop_normalized",
<add> "rmsprop",
<add> "adagrad",
<add> "adagrad_normalized",
<add> "sgd",
<add> "sqrt_n",
<add> ], f"Selected graft type not supported: {self.graft_type}"
<add> assert self.lr_decay in [
<add> None,
<add> "linear",
<add> "exponential",
<add> ], f"Selected learning rate decay not supported: {self.lr_decay}"
<18>:<add> self.mp_devices > 0
<add> ), f"Number of devices for model parallelism must be > 0"
<add> assert (
|
# module: tools.train.train
@dataclass
class TrainingArguments:
def __post_init__(self):
<0> assert self.optim in [
<1> "distributed_shampoo",
<2> "adam",
<3> "adafactor",
<4> ], f"Selected optimizer not supported: {self.optim}"
<5> if self.per_device_eval_batch_size is None:
<6> self.per_device_eval_batch_size = self.per_device_train_batch_size
<7> if (
<8> os.path.exists(self.output_dir)
<9> and os.listdir(self.output_dir)
<10> and self.do_train
<11> and not self.overwrite_output_dir
<12> ):
<13> raise ValueError(
<14> f"Output directory ({self.output_dir}) already exists and is not empty."
<15> "Use --overwrite_output_dir to overcome."
<16> )
<17> assert (
<18> jax.device_count() % self.mp_devices == 0
<19> ), f"Number of available devices ({jax.device_count()} must be divisible by number of devices used for model parallelism ({self.mp_devices})."
<20> self.dp_devices = jax.device_count() // self.mp_devices
<21>
|
===========unchanged ref 0===========
at: os
listdir(path: bytes) -> List[bytes]
listdir(path: int) -> List[str]
listdir(path: Optional[str]=...) -> List[str]
listdir(path: _PathLike[str]) -> List[str]
at: os.path
exists(path: Union[AnyStr, _PathLike[AnyStr]]) -> bool
at: tools.train.train.TrainingArguments
output_dir: str = field(
metadata={
"help": "The output directory where the model predictions and checkpoints will be written."
},
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory. "
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(
default=False, metadata={"help": "Whether to run eval on the validation set."}
)
per_device_train_batch_size: int = field(
default=8,
metadata={"help": "Batch size per data parallel device for training."},
)
per_device_eval_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Batch size per data parallel device for evaluation. Same as training batch size if not set."
},
)
gradient_accumulation_steps: int = field(
default=1,
metadata={
"help": "Number of updates steps to accumulate before performing an update pass."
},
)
gradient_checkpointing: bool = field(
default=False, metadata={"help": "Use gradient checkpointing."}
)
learning_rate: float = field(
default=5e-5, metadata={"help": "The initial learning rate."}
)
===========unchanged ref 1===========
optim: str = field(
default="distributed_shampoo",
metadata={
"help": 'The optimizer to use. Can be "distributed_shampoo" (default), "adam" or "adafactor"'
},
)
beta1: float = field(
default=0.9,
metadata={"help": "Beta1 for Adam & Distributed Shampoo."},
)
beta2: float = field(
default=0.999,
metadata={"help": "Beta2 for for Adam & Distributed Shampoo."},
)
adam_epsilon: float = field(
default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."}
)
max_grad_norm: float = field(
default=1.0, metadata={"help": "Max gradient norm for Adafactor."}
)
block_size: int = field(
default=1024,
metadata={"help": "Chunked size for large layers with Distributed Shampoo."},
)
start_preconditioning_step: int = field(
default=100,
metadata={"help": "Number of steps before starting to update preconditioner."},
)
preconditioning_compute_steps: int = field(
default=10, metadata={"help": "Number of steps to update preconditioner."}
)
skip_preconditioning_dim_size_gt: int = field(
default=4096,
metadata={"help": "Max size for preconditioning with Distributed Shampoo."},
)
optim_quantized: bool = field(
default=False,
metadata={
"help": "Whether to quantize optimizer (only supported with Distributed Shampoo)."
},
)
num_train_epochs: int = field(
default=3, metadata={"help": "Total number of training epochs to perform."}
)
===========unchanged ref 2===========
warmup_steps: int = field(
default=0, metadata={"help": "Linear warmup over warmup_steps."}
)
lr_decay: str = field(
default=None,
metadata={
"help": "Decay to be used in the learning rate scheduler. Can be None (default), linear or exponential."
},
)
lr_transition_steps: int = field(
default=None,
metadata={
"help": "Number of transition steps associated with learning rate decay when using exponential decay."
},
)
lr_decay_rate: float = field(
default=None,
metadata={
"help": "Decay rate associated with learning rate when using exponential decay."
},
)
lr_staircase: bool = field(
default=False,
metadata={
"help": "Whether to use staircase or continuous learning rate when using exponential decay."
},
)
logging_steps: int = field(
default=40, metadata={"help": "Log every X updates steps."}
)
eval_steps: int = field(
default=400, metadata={"help": "Run an evaluation every X steps."}
)
save_steps: int = field(
default=4000, metadata={"help": "Save checkpoint every X updates steps."}
)
log_model: bool = field(
default=False,
metadata={"help": "Log model to wandb at `save_steps` frequency."},
)
seed_model: int = field(
default=42,
metadata={
"help": "Random seed for the model that will be set at the beginning of training."
},
)
wandb_entity: Optional[str] = field(
default=None,
metadata={"help": "The wandb entity to use (for teams)."},
)
===========unchanged ref 3===========
wandb_project: str = field(
default="dalle-mini",
metadata={"help": "The name of the wandb project."},
)
wandb_job_type: str = field(
default="Seq2Seq",
metadata={"help": "The name of the wandb job type."},
)
assert_TPU_available: bool = field(
default=False,
metadata={"help": "Verify that TPU is not in use."},
)
mp_devices: Optional[int] = field(
default=1,
metadata={
"help": "Number of devices required for model parallelism. The other dimension of available devices is used for data parallelism."
},
)
dp_devices: int = field(init=False)
===========changed ref 0===========
# module: tools.train.train
+ cc.initialize_cache("./jax_cache", max_cache_size_bytes=10 * 2**30)
- cc.initialize_cache("./jax_cache", max_cache_size_bytes=5 * 2**30)
logger = logging.getLogger(__name__)
===========changed ref 1===========
# module: tools.train.train
#!/usr/bin/env python
# coding=utf-8
+ # Copyright 2021-2022 The HuggingFace & DALL·E Mini team. All rights reserved.
- # Copyright 2021-2022 The HuggingFace & DALL·E Mini Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Training DALL·E Mini.
Script adapted from run_summarization_flax.py
"""
|
dalle_mini.data/Dataset.__post_init__
|
Modified
|
borisdayma~dalle-mini
|
803ccbf4021a8dcfd7e3c5192b9573b4f6a50c64
|
feat: support pod (#139)
|
<1>:<add> # feed blank captions only in streaming mode for now
<add> if self.blank_caption_prob:
<add> assert (
<add> self.streaming is True
<add> ), "blank_caption_prob can only be used in streaming mode"
|
# module: dalle_mini.data
@dataclass
class Dataset:
def __post_init__(self):
<0> self.multi_hosts = jax.process_count() > 1
<1> # define data_files
<2> if self.train_file is not None or self.validation_file is not None:
<3> # accept braceexpand notation
<4> for k in ["train_file", "validation_file"]:
<5> f = getattr(self, k)
<6> if isinstance(f, str):
<7> setattr(self, k, list(braceexpand(f)))
<8> # for list of files, split training data shards by host
<9> if (
<10> isinstance(self.train_file, list)
<11> and self.multi_hosts
<12> and self.shard_by_host
<13> ):
<14> self.train_file = self.train_file[
<15> jax.process_index() :: jax.process_count()
<16> ]
<17> data_files = {
<18> "train": self.train_file,
<19> "validation": self.validation_file,
<20> }
<21> else:
<22> data_files = None
<23>
<24> # load dataset
<25> dataset = load_dataset(
<26> self.dataset_repo_or_path,
<27> data_files=data_files,
<28> streaming=self.streaming,
<29> use_auth_token=self.use_auth_token,
<30> )
<31> if self.do_train:
<32> if "train" not in dataset:
<33> raise ValueError("Training requires a training dataset")
<34> self.train_dataset = dataset["train"]
<35> if self.max_train_samples is not None:
<36> self.train_dataset = (
<37> self.train_dataset.take(self.max_train_samples)
<38> if self.streaming
<39> else self.train_dataset.select(range(self.max_train_samples))
<40> )
<41> if self.do_eval:
<42> if "validation" not in dataset:
<43> raise ValueError("Evaluating requires a validation dataset")</s>
|
===========below chunk 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
def __post_init__(self):
# offset: 1
if self.max_eval_samples is not None:
self.eval_dataset = (
self.eval_dataset.take(self.max_eval_samples)
if self.streaming
else self.eval_dataset.select(range(self.max_eval_samples))
)
===========unchanged ref 0===========
at: dalle_mini.data.Dataset
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
shard_by_host: bool = False
blank_caption_prob: float = 0.0
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
multi_hosts: bool = field(init=False)
at: dalle_mini.data.Dataset.preprocess
self.train_dataset = self.train_dataset.shuffle(5000, self.seed_dataset)
self.train_dataset = (
self.train_dataset.map(partial_blank_caption_function)
if self.streaming
else self.train_dataset.map(
partial_blank_caption_function,
num_proc=self.preprocessing_num_workers,
load_from_cache_file=False,
desc="Blanking some captions",
)
)
at: datasets.arrow_dataset.Dataset
wrapper(*, keep_in_memory: bool=False, indices_cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, new_fingerprint: Optional[str]=None)
===========unchanged ref 1===========
at: datasets.load
load_dataset(path: str, name: Optional[str]=None, data_dir: Optional[str]=None, data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]]=None, split: Optional[Union[str, Split]]=None, cache_dir: Optional[str]=None, features: Optional[Features]=None, download_config: Optional[DownloadConfig]=None, download_mode: Optional[DownloadMode]=None, ignore_verifications: bool=False, keep_in_memory: Optional[bool]=None, save_infos: bool=False, revision: Optional[Union[str, Version]]=None, use_auth_token: Optional[Union[bool, str]]=None, task: Optional[Union[str, TaskTemplate]]=None, streaming: bool=False, num_proc: Optional[int]=None, *, num_process: int=1, process_id: int=0, seed: Optional[int]=None, experiment_id: Optional[str]=None, max_concurrent_cache_files: int=10000, timeout: Union[int, float]=100, base_path: Optional[str]=None, info: Optional[DatasetInfo]=None, repo_id: Optional[str]=None, **kwargs) -> Union[DatasetDict, Dataset, IterableDatasetDict, IterableDataset]
===========changed ref 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
shard_by_host: bool = False
+ blank_caption_prob: float = 0.0
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
multi_hosts: bool = field(init=False)
===========changed ref 1===========
+ # module: tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices
+
+
===========changed ref 2===========
+ # module: tools.train.scalable_shampoo.quantization_utils
+
+
===========changed ref 3===========
# module: tools.train.train
+ cc.initialize_cache("./jax_cache", max_cache_size_bytes=10 * 2**30)
- cc.initialize_cache("./jax_cache", max_cache_size_bytes=5 * 2**30)
logger = logging.getLogger(__name__)
===========changed ref 4===========
+ # module: tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices
+ def product_with_transpose(
+ mat1,
+ mat2,
+ precision=lax.Precision.DEFAULT,
+ ):
+ """Returns mat1 * mat2^T for two matrices (possibly batched).
+
+ The rows and columns are the last two dimensions for each matrix.
+
+ Args:
+ mat1: First matrix.
+ mat2: Second matrix.
+ precision: JAX precision to use for the multiplication.
+ """
+ return jnp.einsum("...ij,...kj->...ik", mat1, mat2, precision=precision)
+
===========changed ref 5===========
+ # module: tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices
+ @struct.dataclass
+ class SlicedSymmetricMatrix:
+ """A symmetric matrix represented by lower-triangular block row slices.
+
+ For example, the symmetric matrix M = [[a, b^T], [b, c]] would be represented
+ by the block rows a and [b, c].
+
+ The matrix may be batched, in which case each entry of block_rows may have
+ dimension greater than 2. The last two dimensions represent the rows and cols.
+ """
+
+ block_rows: List[jnp.ndarray]
+
===========changed ref 6===========
+ # module: tools.train.scalable_shampoo.quantization_utils
+ # pylint:disable=no-value-for-parameter
+ @struct.dataclass
+ class QuantizedValue:
+ @classmethod
+ def from_float_value(cls, fvalue, quantized_dtype, extract_diagonal=False):
+ if isinstance(fvalue, list) and not fvalue:
+ return QuantizedValue([], [], [], quantized_dtype, extract_diagonal, [])
+ quantized, diagonal_fvalue, bucket_size = QuantizedValue.quantize(
+ fvalue, quantized_dtype, extract_diagonal
+ )
+ return QuantizedValue(
+ quantized,
+ diagonal_fvalue,
+ bucket_size,
+ quantized_dtype,
+ extract_diagonal,
+ list(quantized.shape),
+ )
+
|
dalle_mini.data/Dataset.preprocess
|
Modified
|
borisdayma~dalle-mini
|
803ccbf4021a8dcfd7e3c5192b9573b4f6a50c64
|
feat: support pod (#139)
|
<14>:<add>
<add> # blank captions
<add> if self.blank_caption_prob:
<add> partial_blank_caption_function = partial(
<add> blank_caption_function,
<add> text_column=self.text_column,
<add> blank_caption_prob=self.blank_caption_prob,
<add> )
<add> if hasattr(self, "train_dataset"):
<add> self.train_dataset = (
<add> self.train_dataset.map(partial_blank_caption_function)
<add> if self.streaming
<add> else self.train_dataset.map(
<add> partial_blank_caption_function,
<add> num_proc=self.preprocessing_num_workers,
<add> load_from_cache_file=False,
<add> desc="Blanking some captions",
<add> )
<add> )
|
# module: dalle_mini.data
@dataclass
class Dataset:
def preprocess(self, tokenizer, config):
<0> # get required config variables
<1> decoder_start_token_id = config.decoder_start_token_id
<2> normalize_text = config.normalize_text
<3> max_length = config.max_text_length
<4>
<5> if self.streaming:
<6> # we need to shuffle early in streaming mode
<7> if hasattr(self, "train_dataset"):
<8> self.train_dataset = self.train_dataset.shuffle(5000, self.seed_dataset)
<9> else:
<10> # prepare rng for later shuffling
<11> if self.seed_dataset is None:
<12> self.seed_dataset = np.random.get_state()[1][0]
<13> self.rng_dataset = jax.random.PRNGKey(self.seed_dataset)
<14>
<15> # normalize text
<16> if normalize_text:
<17> text_normalizer = TextNormalizer()
<18> partial_normalize_function = partial(
<19> normalize_function,
<20> text_column=self.text_column,
<21> text_normalizer=text_normalizer,
<22> )
<23> for ds in ["train_dataset", "eval_dataset"]:
<24> if hasattr(self, ds):
<25> setattr(
<26> self,
<27> ds,
<28> (
<29> getattr(self, ds).map(partial_normalize_function)
<30> if self.streaming
<31> else getattr(self, ds).map(
<32> partial_normalize_function,
<33> num_proc=self.preprocessing_num_workers,
<34> load_from_cache_file=not self.overwrite_cache,
<35> desc="Normalizing datasets",
<36> )
<37> ),
<38> )
<39>
<40> # preprocess
<41> partial_preprocess_function = partial(
<42> preprocess_function,
<43> tokenizer=tokenizer,
<44> text_column=self.text_column,
<45> encoding_column=self.encoding_column,
<46> max_length=max</s>
|
===========below chunk 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
def preprocess(self, tokenizer, config):
# offset: 1
decoder_start_token_id=decoder_start_token_id,
)
for ds in ["train_dataset", "eval_dataset"]:
if hasattr(self, ds):
setattr(
self,
ds,
(
getattr(self, ds).map(
partial_preprocess_function,
batched=True,
)
if self.streaming
else getattr(self, ds).map(
partial_preprocess_function,
batched=True,
remove_columns=getattr(ds, "column_names"),
num_proc=self.preprocessing_num_workers,
load_from_cache_file=not self.overwrite_cache,
desc="Preprocessing datasets",
)
),
)
===========unchanged ref 0===========
at: dalle_mini.data
normalize_function(example, text_column, text_normalizer)
preprocess_function(examples, tokenizer, text_column, encoding_column, max_length, decoder_start_token_id)
blank_caption_function(example, text_column, blank_caption_prob)
at: dalle_mini.data.Dataset
streaming: bool = True
text_column: str = "caption"
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
blank_caption_prob: float = 0.0
at: dalle_mini.data.Dataset.__post_init__
self.train_dataset = dataset["train"]
self.train_dataset = (
self.train_dataset.take(self.max_train_samples)
if self.streaming
else self.train_dataset.select(range(self.max_train_samples))
)
at: dalle_mini.data.Dataset.dataloader
self.rng_dataset, input_rng = jax.random.split(self.rng_dataset)
===========unchanged ref 1===========
at: datasets.arrow_dataset.Dataset
map(function: Optional[Callable]=None, with_indices: bool=False, with_rank: bool=False, input_columns: Optional[Union[str, List[str]]]=None, batched: bool=False, batch_size: Optional[int]=1000, drop_last_batch: bool=False, remove_columns: Optional[Union[str, List[str]]]=None, keep_in_memory: bool=False, load_from_cache_file: bool=None, cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, features: Optional[Features]=None, disable_nullable: bool=False, fn_kwargs: Optional[dict]=None, num_proc: Optional[int]=None, suffix_template: str="_{rank:05d}_of_{num_proc:05d}", new_fingerprint: Optional[str]=None, desc: Optional[str]=None) -> "Dataset"
wrapper(*, keep_in_memory: bool=False, indices_cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, new_fingerprint: Optional[str]=None)
wrapper(*, generator: Optional[np.random.Generator]=None, keep_in_memory: bool=False, load_from_cache_file: bool=True, indices_cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, new_fingerprint: Optional[str]=None)
at: functools
partial(func: Callable[..., _T], *args: Any, **kwargs: Any)
partial(func, *args, **keywords, /) -> function with partial application()
at: numpy.random.mtrand
get_state = _rand.get_state
===========changed ref 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
shard_by_host: bool = False
+ blank_caption_prob: float = 0.0
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
multi_hosts: bool = field(init=False)
===========changed ref 1===========
# module: dalle_mini.data
@dataclass
class Dataset:
def __post_init__(self):
self.multi_hosts = jax.process_count() > 1
+ # feed blank captions only in streaming mode for now
+ if self.blank_caption_prob:
+ assert (
+ self.streaming is True
+ ), "blank_caption_prob can only be used in streaming mode"
# define data_files
if self.train_file is not None or self.validation_file is not None:
# accept braceexpand notation
for k in ["train_file", "validation_file"]:
f = getattr(self, k)
if isinstance(f, str):
setattr(self, k, list(braceexpand(f)))
# for list of files, split training data shards by host
if (
isinstance(self.train_file, list)
and self.multi_hosts
and self.shard_by_host
):
self.train_file = self.train_file[
jax.process_index() :: jax.process_count()
]
data_files = {
"train": self.train_file,
"validation": self.validation_file,
}
else:
data_files = None
# load dataset
dataset = load_dataset(
self.dataset_repo_or_path,
data_files=data_files,
streaming=self.streaming,
use_auth_token=self.use_auth_token,
)
if self.do_train:
if "train" not in dataset:
raise ValueError("Training requires a training dataset")
self.train_dataset = dataset["train"]
if self.max_train_samples is not None:
self.train_dataset = (
self.train_dataset.take(self.max_train_samples)
if self.streaming
else self.train_dataset.select(range(self.max_train_samples))
)
if self.do_eval:
if "validation" not</s>
===========changed ref 2===========
# module: dalle_mini.data
@dataclass
class Dataset:
def __post_init__(self):
# offset: 1
<s>(range(self.max_train_samples))
)
if self.do_eval:
if "validation" not in dataset:
raise ValueError("Evaluating requires a validation dataset")
self.eval_dataset = dataset["validation"]
if self.max_eval_samples is not None:
self.eval_dataset = (
self.eval_dataset.take(self.max_eval_samples)
if self.streaming
else self.eval_dataset.select(range(self.max_eval_samples))
)
===========changed ref 3===========
+ # module: tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices
+
+
===========changed ref 4===========
+ # module: tools.train.scalable_shampoo.quantization_utils
+
+
|
tools.train.train/ModelArguments.__post_init__
|
Modified
|
borisdayma~dalle-mini
|
793987480b7f78056df1a2903562c28b1f6214d3
|
feat(data): super conditioning (#141)
|
<1>:<add> self.tokenizer_name = self.model_name_or_path
<del> self.tokenizer_name == self.model_name_or_path
|
# module: tools.train.train
@dataclass
class ModelArguments:
def __post_init__(self):
<0> if self.tokenizer_name is None:
<1> self.tokenizer_name == self.model_name_or_path
<2> assert (
<3> self.tokenizer_name is not None
<4> ), "Tokenizer name or model name/path needs to be specified"
<5> if self.restore_state:
<6> assert self.model_name_or_path is not None and (
<7> "/model-" in self.model_name_or_path
<8> ), "Restoring state only available with W&B artifact reference"
<9>
|
===========unchanged ref 0===========
at: tools.train.train.ModelArguments
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. "
"Don't set if you want to train a model from scratch. "
"W&B artifact references are supported in addition to the sources supported by `PreTrainedModel`."
},
)
config_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained config name or path if not the same as model_name_or_path"
},
)
tokenizer_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained tokenizer name or path if not the same as model_name_or_path"
},
)
dtype: Optional[str] = field(
default="float32",
metadata={
"help": "Floating-point format in which the computations will be performed (not the model weights). Choose one of `[float32, float16, bfloat16]`."
},
)
restore_state: Optional[bool] = field(
default=False,
metadata={
"help": "Restore optimizer and training state. Can be True (will retrieve associated wandb artifact), a local directory or a Google bucket path."
},
)
at: tools.train.train.ModelArguments.get_opt_state
self.restore_state = str(Path(artifact_dir) / "opt_state.msgpack")
self.restore_state = artifact.metadata["bucket_path"]
===========changed ref 0===========
# module: dalle_mini
+ __version__ = "0.0.3"
- __version__ = "0.0.2"
===========changed ref 1===========
# module: dalle_mini.model.modeling
+ @flax.struct.dataclass
+ class SampleState:
+ cur_len: jnp.ndarray
+ sequences: jnp.ndarray
+ running_token: jnp.ndarray
+ is_sent_finished: jnp.ndarray
+ prng_key: jnp.ndarray
+ model_kwargs: Dict[str, jnp.ndarray]
+ model_kwargs_uncond: Dict[str, jnp.ndarray]
+
===========changed ref 2===========
<s> <add> length_penalty: Optional[float] = None,
+ early_stopping: Optional[bool] = None,
+ trace: bool = True,
+ params: Optional[Dict[str, jnp.ndarray]] = None,
+ condition_scale: Optional[float] = 1.0,
+ input_ids_uncond: Optional[jnp.ndarray] = None,
+ attention_mask_uncond: Optional[jnp.ndarray] = None,
+ **model_kwargs,
+ ):
+ """Edit: Allow super conditioning."""
+
+ # set init values
+ max_length = max_length if max_length is not None else self.config.max_length
+ bos_token_id = (
+ bos_token_id if bos_token_id is not None else self.config.bos_token_id
+ )
+ pad_token_id = (
+ pad_token_id if pad_token_id is not None else self.config.pad_token_id
+ )
+ eos_token_id = (
+ eos_token_id if eos_token_id is not None else self.config.eos_token_id
+ )
+ decoder_start_token_id = (
+ decoder_start_token_id
+ if decoder_start_token_id
+ else self.config.decoder_start_token_id
+ )
+ prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0)
+
+ if decoder_start_token_id is None and self.config.is_encoder_decoder:
+ raise ValueError(
+ "`decoder_start_token_id` has to be defined for encoder-decoder generation."
+ )
+
+ do_sample = do_sample if do_sample is not None else self.config.do_sample
+ num_beams = num_beams if num_beams is not None else self.config.num_beams
+
+ if self.config.is_encoder_</s>
===========changed ref 3===========
<s>alty: Optional[float] = None,
+ early_stopping: Optional[bool] = None,
+ trace: bool = True,
+ params: Optional[Dict[str, jnp.ndarray]] = None,
+ condition_scale: Optional[float] = 1.0,
+ input_ids_uncond: Optional[jnp.ndarray] = None,
+ attention_mask_uncond: Optional[jnp.ndarray] = None,
+ **model_kwargs,
+ ):
# offset: 1
<s> num_beams is not None else self.config.num_beams
+
+ if self.config.is_encoder_decoder:
+ # add encoder_outputs to model_kwargs
+ if model_kwargs.get("encoder_outputs") is None:
+ model_kwargs_input = dict(model_kwargs)
+ model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(
+ input_ids,
+ params,
+ {"attention_mask": attention_mask, **model_kwargs_input},
+ )
+ if condition_scale != 1.0:
+ assert (
+ input_ids_uncond is not None
+ ), "`input_ids_uncond` has to be defined for super conditioning."
+ assert (
+ do_sample is True
+ ), "`do_sample` has to be True for super conditioning."
+ assert (
+ num_beams == 1
+ ), "`num_beams` has to be 1 for super conditioning."
+ model_kwargs_uncond = (
+ self._prepare_encoder_decoder_kwargs_for_generation(
+ input_ids_uncond,
+ params,
+ {
+ "attention_mask": attention_mask_uncond,
+ **model_kwargs_input,
+ },
+ )
+ )
+ else:
+ model_kwargs_uncond = None
</s>
|
dalle_mini.data/Dataset.preprocess
|
Modified
|
borisdayma~dalle-mini
|
793987480b7f78056df1a2903562c28b1f6214d3
|
feat(data): super conditioning (#141)
|
<15>:<del> # blank captions
<16>:<del> if self.blank_caption_prob:
<17>:<add> # filter data
<add> partial_filter_function = partial(
<del> partial_blank_caption_function = partial(
<18>:<add> filter_function,
<del> blank_caption_function,
<19>:<add> filter_column=self.filter_column,
<del> text_column=self.text_column,
<20>:<add> filter_value=self.filter_value,
<add> clip_score_column=self.clip_score_column,
<add> min_clip_score=self.min_clip_score,
<add> max_clip_score=self.max_clip_score,
<del> blank_caption_prob=self.blank_caption_prob,
<21>:<add> )
<del> )
<22>:<add> for ds in ["train_dataset", "eval_dataset"]:
<add> if hasattr(self, ds):
<del> if hasattr(self, "train_dataset"):
<23>:<add> setattr(
<add> self,
<add> ds,
<add> (
<add> getattr(self, ds).filter(partial_filter_function)
<del> self.train_dataset = (
<24>:<del> self.train_dataset.map(partial_blank_caption_function)
<25>:<add> if self.streaming
<del> if self.streaming
<26>:<add> else getattr(self, ds).filter(
<del> else self.train_dataset.map(
<27>:<add> partial_filter_function,
<del> partial_blank_caption_function,
<28>:<add> num_proc=self.preprocessing_num_workers,
<del> num_proc=self.preprocessing_num_workers,
|
# module: dalle_mini.data
@dataclass
class Dataset:
def preprocess(self, tokenizer, config):
<0> # get required config variables
<1> decoder_start_token_id = config.decoder_start_token_id
<2> normalize_text = config.normalize_text
<3> max_length = config.max_text_length
<4>
<5> if self.streaming:
<6> # we need to shuffle early in streaming mode
<7> if hasattr(self, "train_dataset"):
<8> self.train_dataset = self.train_dataset.shuffle(5000, self.seed_dataset)
<9> else:
<10> # prepare rng for later shuffling
<11> if self.seed_dataset is None:
<12> self.seed_dataset = np.random.get_state()[1][0]
<13> self.rng_dataset = jax.random.PRNGKey(self.seed_dataset)
<14>
<15> # blank captions
<16> if self.blank_caption_prob:
<17> partial_blank_caption_function = partial(
<18> blank_caption_function,
<19> text_column=self.text_column,
<20> blank_caption_prob=self.blank_caption_prob,
<21> )
<22> if hasattr(self, "train_dataset"):
<23> self.train_dataset = (
<24> self.train_dataset.map(partial_blank_caption_function)
<25> if self.streaming
<26> else self.train_dataset.map(
<27> partial_blank_caption_function,
<28> num_proc=self.preprocessing_num_workers,
<29> load_from_cache_file=False,
<30> desc="Blanking some captions",
<31> )
<32> )
<33>
<34> # normalize text
<35> if normalize_text:
<36> text_normalizer = TextNormalizer()
<37> partial_normalize_function = partial(
<38> normalize_function,
<39> text_column=self.text_column,
<40> text_normalizer=text_normalizer,
<41> )
<42> for ds in ["train_dataset", "eval_dataset"]</s>
|
===========below chunk 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
def preprocess(self, tokenizer, config):
# offset: 1
if hasattr(self, ds):
setattr(
self,
ds,
(
getattr(self, ds).map(partial_normalize_function)
if self.streaming
else getattr(self, ds).map(
partial_normalize_function,
num_proc=self.preprocessing_num_workers,
load_from_cache_file=not self.overwrite_cache,
desc="Normalizing datasets",
)
),
)
# preprocess
partial_preprocess_function = partial(
preprocess_function,
tokenizer=tokenizer,
text_column=self.text_column,
encoding_column=self.encoding_column,
max_length=max_length,
decoder_start_token_id=decoder_start_token_id,
)
for ds in ["train_dataset", "eval_dataset"]:
if hasattr(self, ds):
setattr(
self,
ds,
(
getattr(self, ds).map(
partial_preprocess_function,
batched=True,
remove_columns=[
self.text_column,
self.encoding_column,
],
)
if self.streaming
else getattr(self, ds).map(
partial_preprocess_function,
batched=True,
remove_columns=getattr(ds, "column_names"),
num_proc=self.preprocessing_num_workers,
load_from_cache_file=not self.overwrite_cache,
desc="Preprocessing datasets",
)
),
)
===========unchanged ref 0===========
at: dalle_mini.data
blank_caption_function(example, text_column, blank_caption_prob)
normalize_function(example, text_column, text_normalizer)
filter_function(example, min_clip_score, max_clip_score, clip_score_column, filter_column, filter_value)
at: dalle_mini.data.Dataset
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
shard_by_host: bool = False
blank_caption_prob: float = 0.0
clip_score_column: str = "clip_score"
min_clip_score: float = None
max_clip_score: float = None
filter_column: str = None
filter_value: str = None
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
multi_hosts: bool = field(init=False)
at: dalle_mini.data.Dataset.__post_init__
self.train_dataset = dataset["train"]
self.train_dataset = (
self.train_dataset.take(self.max_train_samples)
if self.streaming
else self.train_dataset.select(range(self.max_train_samples))
)
===========unchanged ref 1===========
at: dalle_mini.data.Dataset.dataloader
self.rng_dataset, input_rng = jax.random.split(self.rng_dataset)
at: datasets.arrow_dataset.Dataset
wrapper(*, with_indices=False, input_columns: Optional[Union[str, List[str]]]=None, batched: bool=False, batch_size: Optional[int]=1000, keep_in_memory: bool=False, load_from_cache_file: bool=True, cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, fn_kwargs: Optional[dict]=None, num_proc: Optional[int]=None, suffix_template: str="_{rank:05d}_of_{num_proc:05d}", new_fingerprint: Optional[str]=None, desc: Optional[str]=None)
wrapper(*, keep_in_memory: bool=False, indices_cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, new_fingerprint: Optional[str]=None)
wrapper(*, generator: Optional[np.random.Generator]=None, keep_in_memory: bool=False, load_from_cache_file: bool=True, indices_cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, new_fingerprint: Optional[str]=None)
at: datasets.dataset_dict.DatasetDict
shuffle(seeds: Optional[Union[int, Dict[str, Optional[int]]]]=None, seed: Optional[int]=None, generators: Optional[Dict[str, np.random.Generator]]=None, keep_in_memory: bool=False, load_from_cache_file: bool=True, indices_cache_file_names: Optional[Dict[str, Optional[str]]]=None, writer_batch_size: Optional[int]=1000) -> "DatasetDict"
===========unchanged ref 2===========
at: functools
partial(func: Callable[..., _T], *args: Any, **kwargs: Any)
partial(func, *args, **keywords, /) -> function with partial application()
at: numpy.random.mtrand
get_state = _rand.get_state
===========changed ref 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
shard_by_host: bool = False
blank_caption_prob: float = 0.0
+ clip_score_column: str = "clip_score"
+ min_clip_score: float = None
+ max_clip_score: float = None
+ filter_column: str = None
+ filter_value: str = None
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
multi_hosts: bool = field(init=False)
===========changed ref 1===========
# module: dalle_mini
+ __version__ = "0.0.3"
- __version__ = "0.0.2"
===========changed ref 2===========
# module: dalle_mini.model.modeling
+ @flax.struct.dataclass
+ class SampleState:
+ cur_len: jnp.ndarray
+ sequences: jnp.ndarray
+ running_token: jnp.ndarray
+ is_sent_finished: jnp.ndarray
+ prng_key: jnp.ndarray
+ model_kwargs: Dict[str, jnp.ndarray]
+ model_kwargs_uncond: Dict[str, jnp.ndarray]
+
|
dalle_mini.data/Dataset.dataloader
|
Modified
|
borisdayma~dalle-mini
|
793987480b7f78056df1a2903562c28b1f6214d3
|
feat(data): super conditioning (#141)
|
# module: dalle_mini.data
@dataclass
class Dataset:
def dataloader(self, split, batch_size, epoch=None):
<0> def _dataloader_datasets_non_streaming(
<1> dataset: Dataset,
<2> rng: jax.random.PRNGKey = None,
<3> ):
<4> """
<5> Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices.
<6> Shuffle batches if rng is set.
<7> """
<8> steps_per_epoch = len(dataset) // batch_size
<9>
<10> if rng is not None:
<11> batch_idx = jax.random.permutation(rng, len(dataset))
<12> else:
<13> batch_idx = jnp.arange(len(dataset))
<14>
<15> batch_idx = batch_idx[
<16> : steps_per_epoch * batch_size
<17> ] # Skip incomplete batch.
<18> batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))
<19>
<20> for idx in batch_idx:
<21> batch = dataset[idx]
<22> batch = {k: jnp.array(v) for k, v in batch.items()}
<23> yield batch
<24>
<25> def _dataloader_datasets_streaming(
<26> dataset: Dataset,
<27> epoch: int,
<28> ):
<29> keys = ["input_ids", "attention_mask", "labels", "decoder_input_ids"]
<30> batch = {k: [] for k in keys}
<31> first_loop = True # stop after one loop in some cases
<32> while (self.multi_hosts and split == "train") or first_loop:
<33> # in multi-host, we run forever (no epoch) as hosts need to stop
<34> # at the same time and training data may not be split equally
<35> # For validation data we put the entire batch on each host and then
<36> # keep only the one specific to each host (could be improved but not necessary)
<37> if epoch is not None:
<38> assert split == "train</s>
|
===========below chunk 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
def dataloader(self, split, batch_size, epoch=None):
# offset: 1
# reshuffle training data at each epoch
dataset.set_epoch(epoch)
epoch += 1
for item in dataset:
for k, v in item.items():
batch[k].append(v)
if len(batch[keys[0]]) == batch_size:
batch = {k: jnp.array(v) for k, v in batch.items()}
yield batch
batch = {k: [] for k in keys}
first_loop = False
if split == "train":
ds = self.train_dataset
elif split == "eval":
ds = self.eval_dataset
else:
raise ValueError(f'split must be "train" or "eval", got {split}')
if self.streaming:
return _dataloader_datasets_streaming(ds, epoch)
else:
if split == "train":
self.rng_dataset, input_rng = jax.random.split(self.rng_dataset)
return _dataloader_datasets_non_streaming(ds, input_rng)
===========unchanged ref 0===========
at: dalle_mini.data
Dataset(dataset_repo_or_path: str, train_file: str=None, validation_file: str=None, streaming: bool=True, use_auth_token: bool=False, text_column: str="caption", encoding_column: str="encoding", max_train_samples: int=None, max_eval_samples: int=None, preprocessing_num_workers: int=None, overwrite_cache: bool=False, do_train: bool=False, do_eval: bool=True, seed_dataset: int=None, shard_by_host: bool=False, blank_caption_prob: float=0.0, clip_score_column: str="clip_score", min_clip_score: float=None, max_clip_score: float=None, filter_column: str=None, filter_value: str=None, train_dataset: Dataset=field(init=False), eval_dataset: Dataset=field(init=False), rng_dataset: jnp.ndarray=field(init=False), multi_hosts: bool=field(init=False))
at: dalle_mini.data.Dataset.preprocess
decoder_start_token_id = config.decoder_start_token_id
max_length = config.max_text_length
partial_preprocess_function = partial(
preprocess_function,
tokenizer=tokenizer,
text_column=self.text_column,
encoding_column=self.encoding_column,
max_length=max_length,
decoder_start_token_id=decoder_start_token_id,
)
===========changed ref 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
shard_by_host: bool = False
blank_caption_prob: float = 0.0
+ clip_score_column: str = "clip_score"
+ min_clip_score: float = None
+ max_clip_score: float = None
+ filter_column: str = None
+ filter_value: str = None
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
multi_hosts: bool = field(init=False)
===========changed ref 1===========
# module: dalle_mini
+ __version__ = "0.0.3"
- __version__ = "0.0.2"
===========changed ref 2===========
# module: dalle_mini.model.modeling
+ @flax.struct.dataclass
+ class SampleState:
+ cur_len: jnp.ndarray
+ sequences: jnp.ndarray
+ running_token: jnp.ndarray
+ is_sent_finished: jnp.ndarray
+ prng_key: jnp.ndarray
+ model_kwargs: Dict[str, jnp.ndarray]
+ model_kwargs_uncond: Dict[str, jnp.ndarray]
+
===========changed ref 3===========
# module: tools.train.train
@dataclass
class ModelArguments:
def __post_init__(self):
if self.tokenizer_name is None:
+ self.tokenizer_name = self.model_name_or_path
- self.tokenizer_name == self.model_name_or_path
assert (
self.tokenizer_name is not None
), "Tokenizer name or model name/path needs to be specified"
if self.restore_state:
assert self.model_name_or_path is not None and (
"/model-" in self.model_name_or_path
), "Restoring state only available with W&B artifact reference"
===========changed ref 4===========
# module: dalle_mini.data
@dataclass
class Dataset:
def preprocess(self, tokenizer, config):
# get required config variables
decoder_start_token_id = config.decoder_start_token_id
normalize_text = config.normalize_text
max_length = config.max_text_length
if self.streaming:
# we need to shuffle early in streaming mode
if hasattr(self, "train_dataset"):
self.train_dataset = self.train_dataset.shuffle(5000, self.seed_dataset)
else:
# prepare rng for later shuffling
if self.seed_dataset is None:
self.seed_dataset = np.random.get_state()[1][0]
self.rng_dataset = jax.random.PRNGKey(self.seed_dataset)
- # blank captions
- if self.blank_caption_prob:
+ # filter data
+ partial_filter_function = partial(
- partial_blank_caption_function = partial(
+ filter_function,
- blank_caption_function,
+ filter_column=self.filter_column,
- text_column=self.text_column,
+ filter_value=self.filter_value,
+ clip_score_column=self.clip_score_column,
+ min_clip_score=self.min_clip_score,
+ max_clip_score=self.max_clip_score,
- blank_caption_prob=self.blank_caption_prob,
+ )
- )
+ for ds in ["train_dataset", "eval_dataset"]:
+ if hasattr(self, ds):
- if hasattr(self, "train_dataset"):
+ setattr(
+ self,
+ ds,
+ (
+ getattr(self, ds).filter(partial_filter_function)
- self.train_dataset = (
- self.train_dataset.map(partial_blank_caption_function)
+ if self.streaming
- if self.streaming
</s>
|
|
tools.train.train/MetricsLogger.log
|
Modified
|
borisdayma~dalle-mini
|
b7b619a2ab6b9dec1812d074fbee7b5abafae158
|
feat(train): log norm and histograms (#143)
|
<1>:<add> log_metrics = {}
<del> log_metrics = {
<2>:<del> f"{prefix}/{k}" if prefix is not None else k: v
<3>:<add> for k, v in metrics.items():
<del> for k, v in metrics.items()
<4>:<add> if prefix is not None:
<add> k = f"{prefix}/{k}"
<add> if "_norm" in k:
<add> log_metrics[f"{k}/"] = unfreeze(v)
<add> elif "_hist" in k:
<add> v = jax.tree_map(lambda x: jax.device_get(x), unfreeze(v))
<add> v = jax.tree_map(
<add> lambda x: wandb.Histogram(np_histogram=x),
<add> v,
<add> is_leaf=lambda x: isinstance(x, tuple),
<add> )
<add> log_metrics[f"{k}/"] = v
<add> else:
<add> log_metrics[k] = v
<del> }
|
# module: tools.train.train
class MetricsLogger:
def log(self, metrics, prefix=None):
<0> if jax.process_index() == 0:
<1> log_metrics = {
<2> f"{prefix}/{k}" if prefix is not None else k: v
<3> for k, v in metrics.items()
<4> }
<5> wandb.log({**log_metrics, **self.state_dict})
<6>
|
===========unchanged ref 0===========
at: tools.train.train.MetricsLogger.__init__
self.step = step
self.time = time.perf_counter()
self.state_dict = {}
at: tools.train.train.MetricsLogger.update_state_metrics
self.state_dict = {
f'train/{k.split("_")[-1]}': getattr(state, k)
for k in ["step", "epoch", "train_time", "train_samples"]
}
new_step = int(state.step)
new_time = time.perf_counter()
===========changed ref 0===========
# module: tools.train.train
@dataclass
class TrainingArguments:
"""
Arguments pertaining to training parameters.
"""
output_dir: str = field(
metadata={
"help": "The output directory where the model predictions and checkpoints will be written."
},
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory. "
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(
default=False, metadata={"help": "Whether to run eval on the validation set."}
)
per_device_train_batch_size: int = field(
default=8,
metadata={"help": "Batch size per data parallel device for training."},
)
per_device_eval_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Batch size per data parallel device for evaluation. Same as training batch size if not set."
},
)
gradient_accumulation_steps: int = field(
default=1,
metadata={
"help": "Number of updates steps to accumulate before performing an update pass."
},
)
gradient_checkpointing: bool = field(
default=False, metadata={"help": "Use gradient checkpointing."}
)
learning_rate: float = field(
default=5e-5, metadata={"help": "The initial learning rate."}
)
optim: str = field(
default="distributed_shampoo",
metadata={
"help": 'The optimizer to use. Can be "distributed_shampoo" (default), "adam" or "adafactor"'
},
)
beta1: float = field(
default=0.9,
metadata={"help": "Beta1 for</s>
===========changed ref 1===========
# module: tools.train.train
@dataclass
class TrainingArguments:
# offset: 1
<s> )
beta1: float = field(
default=0.9,
metadata={"help": "Beta1 for Adam & Distributed Shampoo."},
)
beta2: float = field(
default=0.999,
metadata={"help": "Beta2 for for Adam & Distributed Shampoo."},
)
adam_epsilon: float = field(
default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."}
)
max_grad_norm: float = field(
default=1.0, metadata={"help": "Max gradient norm for Adafactor."}
)
block_size: int = field(
default=1024,
metadata={"help": "Chunked size for large layers with Distributed Shampoo."},
)
preconditioning_compute_steps: int = field(
default=10, metadata={"help": "Number of steps to update preconditioner."}
)
skip_preconditioning_dim_size_gt: int = field(
default=4096,
metadata={"help": "Max size for preconditioning with Distributed Shampoo."},
)
graft_type: str = field(
default="rmsprop_normalized",
metadata={
"help": "The type of grafting to use. Can be 'rmsprop_normalized' (default), 'rmsprop', 'adagrad', 'adagrad_normalized', 'sgd' or 'sqrt_n'"
},
)
optim_quantized: bool = field(
default=False,
metadata={
"help": "Whether to quantize optimizer (only supported with Distributed Shampoo)."
},
)
num_train_epochs: int = field(
default=3, metadata={"help": "Total number of training epochs to perform."}
)
</s>
===========changed ref 2===========
# module: tools.train.train
@dataclass
class TrainingArguments:
# offset: 2
<s> warmup_steps: int = field(
default=0, metadata={"help": "Linear warmup over warmup_steps."}
)
lr_decay: str = field(
default=None,
metadata={
"help": "Decay to be used in the learning rate scheduler. Can be None (default), linear or exponential."
},
)
lr_transition_steps: int = field(
default=None,
metadata={
"help": "Number of transition steps associated with learning rate decay when using exponential decay."
},
)
lr_decay_rate: float = field(
default=None,
metadata={
"help": "Decay rate associated with learning rate when using exponential decay."
},
)
lr_staircase: bool = field(
default=False,
metadata={
"help": "Whether to use staircase or continuous learning rate when using exponential decay."
},
)
logging_steps: int = field(
default=40, metadata={"help": "Log every X updates steps."}
)
eval_steps: int = field(
default=400, metadata={"help": "Run an evaluation every X steps."}
)
save_steps: int = field(
default=4000, metadata={"help": "Save checkpoint every X updates steps."}
)
log_model: bool = field(
default=False,
metadata={"help": "Log model to wandb at `save_steps` frequency."},
)
+ log_histograms: bool = field(
+ default=False,
+ metadata={
+ "help": "Log parameters and gradients histograms. Slows down training."
+ },
+ )
seed_model: int = field(
default=42,
metadata={
"help": "Random seed for the model</s>
===========changed ref 3===========
# module: tools.train.train
@dataclass
class TrainingArguments:
# offset: 3
<s> be set at the beginning of training."
},
)
wandb_entity: Optional[str] = field(
default=None,
metadata={"help": "The wandb entity to use (for teams)."},
)
wandb_project: str = field(
default="dalle-mini",
metadata={"help": "The name of the wandb project."},
)
wandb_job_type: str = field(
default="Seq2Seq",
metadata={"help": "The name of the wandb job type."},
)
assert_TPU_available: bool = field(
default=False,
metadata={"help": "Verify that TPU is not in use."},
)
mp_devices: Optional[int] = field(
default=1,
metadata={
"help": "Number of devices required for model parallelism. The other dimension of available devices is used for data parallelism."
},
)
dp_devices: int = field(init=False)
|
tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices/product_with_transpose
|
Modified
|
borisdayma~dalle-mini
|
b7b619a2ab6b9dec1812d074fbee7b5abafae158
|
feat(train): log norm and histograms (#143)
|
<7>:<add> axes: The axes over which to apply the product.
<9>:<add> return jnp.tensordot(a=mat1, b=mat2, axes=axes, precision=precision)
<del> return jnp.einsum("...ij,...kj->...ik", mat1, mat2, precision=precision)
|
# module: tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices
def product_with_transpose(
mat1,
mat2,
+ axes,
precision=lax.Precision.DEFAULT,
):
<0> """Returns mat1 * mat2^T for two matrices (possibly batched).
<1>
<2> The rows and columns are the last two dimensions for each matrix.
<3>
<4> Args:
<5> mat1: First matrix.
<6> mat2: Second matrix.
<7> precision: JAX precision to use for the multiplication.
<8> """
<9> return jnp.einsum("...ij,...kj->...ik", mat1, mat2, precision=precision)
<10>
|
===========changed ref 0===========
# module: tools.train.train
class MetricsLogger:
def log(self, metrics, prefix=None):
if jax.process_index() == 0:
+ log_metrics = {}
- log_metrics = {
- f"{prefix}/{k}" if prefix is not None else k: v
+ for k, v in metrics.items():
- for k, v in metrics.items()
+ if prefix is not None:
+ k = f"{prefix}/{k}"
+ if "_norm" in k:
+ log_metrics[f"{k}/"] = unfreeze(v)
+ elif "_hist" in k:
+ v = jax.tree_map(lambda x: jax.device_get(x), unfreeze(v))
+ v = jax.tree_map(
+ lambda x: wandb.Histogram(np_histogram=x),
+ v,
+ is_leaf=lambda x: isinstance(x, tuple),
+ )
+ log_metrics[f"{k}/"] = v
+ else:
+ log_metrics[k] = v
- }
wandb.log({**log_metrics, **self.state_dict})
===========changed ref 1===========
# module: tools.train.train
@dataclass
class TrainingArguments:
"""
Arguments pertaining to training parameters.
"""
output_dir: str = field(
metadata={
"help": "The output directory where the model predictions and checkpoints will be written."
},
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory. "
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(
default=False, metadata={"help": "Whether to run eval on the validation set."}
)
per_device_train_batch_size: int = field(
default=8,
metadata={"help": "Batch size per data parallel device for training."},
)
per_device_eval_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Batch size per data parallel device for evaluation. Same as training batch size if not set."
},
)
gradient_accumulation_steps: int = field(
default=1,
metadata={
"help": "Number of updates steps to accumulate before performing an update pass."
},
)
gradient_checkpointing: bool = field(
default=False, metadata={"help": "Use gradient checkpointing."}
)
learning_rate: float = field(
default=5e-5, metadata={"help": "The initial learning rate."}
)
optim: str = field(
default="distributed_shampoo",
metadata={
"help": 'The optimizer to use. Can be "distributed_shampoo" (default), "adam" or "adafactor"'
},
)
beta1: float = field(
default=0.9,
metadata={"help": "Beta1 for</s>
===========changed ref 2===========
# module: tools.train.train
@dataclass
class TrainingArguments:
# offset: 1
<s> )
beta1: float = field(
default=0.9,
metadata={"help": "Beta1 for Adam & Distributed Shampoo."},
)
beta2: float = field(
default=0.999,
metadata={"help": "Beta2 for for Adam & Distributed Shampoo."},
)
adam_epsilon: float = field(
default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."}
)
max_grad_norm: float = field(
default=1.0, metadata={"help": "Max gradient norm for Adafactor."}
)
block_size: int = field(
default=1024,
metadata={"help": "Chunked size for large layers with Distributed Shampoo."},
)
preconditioning_compute_steps: int = field(
default=10, metadata={"help": "Number of steps to update preconditioner."}
)
skip_preconditioning_dim_size_gt: int = field(
default=4096,
metadata={"help": "Max size for preconditioning with Distributed Shampoo."},
)
graft_type: str = field(
default="rmsprop_normalized",
metadata={
"help": "The type of grafting to use. Can be 'rmsprop_normalized' (default), 'rmsprop', 'adagrad', 'adagrad_normalized', 'sgd' or 'sqrt_n'"
},
)
optim_quantized: bool = field(
default=False,
metadata={
"help": "Whether to quantize optimizer (only supported with Distributed Shampoo)."
},
)
num_train_epochs: int = field(
default=3, metadata={"help": "Total number of training epochs to perform."}
)
</s>
===========changed ref 3===========
# module: tools.train.train
@dataclass
class TrainingArguments:
# offset: 2
<s> warmup_steps: int = field(
default=0, metadata={"help": "Linear warmup over warmup_steps."}
)
lr_decay: str = field(
default=None,
metadata={
"help": "Decay to be used in the learning rate scheduler. Can be None (default), linear or exponential."
},
)
lr_transition_steps: int = field(
default=None,
metadata={
"help": "Number of transition steps associated with learning rate decay when using exponential decay."
},
)
lr_decay_rate: float = field(
default=None,
metadata={
"help": "Decay rate associated with learning rate when using exponential decay."
},
)
lr_staircase: bool = field(
default=False,
metadata={
"help": "Whether to use staircase or continuous learning rate when using exponential decay."
},
)
logging_steps: int = field(
default=40, metadata={"help": "Log every X updates steps."}
)
eval_steps: int = field(
default=400, metadata={"help": "Run an evaluation every X steps."}
)
save_steps: int = field(
default=4000, metadata={"help": "Save checkpoint every X updates steps."}
)
log_model: bool = field(
default=False,
metadata={"help": "Log model to wandb at `save_steps` frequency."},
)
+ log_histograms: bool = field(
+ default=False,
+ metadata={
+ "help": "Log parameters and gradients histograms. Slows down training."
+ },
+ )
seed_model: int = field(
default=42,
metadata={
"help": "Random seed for the model</s>
|
tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices/sliced_transposed_product
|
Modified
|
borisdayma~dalle-mini
|
b7b619a2ab6b9dec1812d074fbee7b5abafae158
|
feat(train): log norm and histograms (#143)
|
<0>:<add> """Returns the blocked slices representing a symmetric contraction.
<del> """Returns the blocked slices representing a symmetric matrix mat*mat^T.
<1>:<add>
<add> Specifically, the output is a contraction of the input mat with itself, in the
<add> specified axes.
<3>:<del> mat: The matrix for which we will compute mat*mat^T. It does not need to be
<4>:<del> square, and may be batched.
<5>:<add> mat: The matrix for which we will compute a contraction with itself.
<6>:<add> axes: Axes to use for the contraction.
<12>:<add> rank = len(mat.shape)
<add>
<add> def _make_axis_positive(ax):
<add> assert -rank <= ax < rank
<add> return ax + rank if ax < 0 else ax
<add>
<add> positive_axes = [_make_axis_positive(ax) for ax in axes]
<add> assert len(positive_axes) == len(axes)
<add> remaining_axes = set(range(rank)) - set(positive_axes)
<add> assert len(remaining_axes) == 1
<add> remaining_ax = remaining_axes.pop()
<add>
<add> num_rows = mat.shape[remaining_ax]
<del> num_rows = mat.shape[-2]
<18>:<add>
<add> block_rows =
|
<s>: tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices
+ @functools.partial(jax.jit, static_argnames=("block_size", "axes", "precision"))
- @functools.partial(jax.jit, static_argnames=("block_size", "precision"))
def sliced_transposed_product(
mat,
block_size,
+ axes=(-1,),
precision=lax.Precision.DEFAULT,
):
<0> """Returns the blocked slices representing a symmetric matrix mat*mat^T.
<1>
<2> Args:
<3> mat: The matrix for which we will compute mat*mat^T. It does not need to be
<4> square, and may be batched.
<5> block_size: The size of row blocks to compute.
<6> precision: The precision to use in each computation.
<7>
<8> Raises:
<9> ValueError: Raised when the specified block size does not evenly divide
<10> the number of rows of the input mat.
<11> """
<12> num_rows = mat.shape[-2]
<13> if num_rows % block_size != 0:
<14> raise ValueError(
<15> "The row dimension must be divisible by block_size. "
<16> f"Instead got row dimension={num_rows} and block_size={block_size}."
<17> )
<18> block_rows = [
<19> product_with_transpose(
<20> mat[Ellipsis, i * block_size : (i + 1) * block_size, :],
<21> mat[Ellipsis, 0 : (i + 1) * block_size, :],
<22> precision,
<23> )
<24> for i in range(num_rows // block_size)
<25> ]
<26> return SlicedSymmetricMatrix(block_rows=block_rows)
<27>
|
===========unchanged ref 0===========
at: functools
partial(func: Callable[..., _T], *args: Any, **kwargs: Any)
partial(func, *args, **keywords, /) -> function with partial application()
===========changed ref 0===========
# module: tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices
def product_with_transpose(
mat1,
mat2,
+ axes,
precision=lax.Precision.DEFAULT,
):
"""Returns mat1 * mat2^T for two matrices (possibly batched).
The rows and columns are the last two dimensions for each matrix.
Args:
mat1: First matrix.
mat2: Second matrix.
+ axes: The axes over which to apply the product.
precision: JAX precision to use for the multiplication.
"""
+ return jnp.tensordot(a=mat1, b=mat2, axes=axes, precision=precision)
- return jnp.einsum("...ij,...kj->...ik", mat1, mat2, precision=precision)
===========changed ref 1===========
# module: tools.train.train
class MetricsLogger:
def log(self, metrics, prefix=None):
if jax.process_index() == 0:
+ log_metrics = {}
- log_metrics = {
- f"{prefix}/{k}" if prefix is not None else k: v
+ for k, v in metrics.items():
- for k, v in metrics.items()
+ if prefix is not None:
+ k = f"{prefix}/{k}"
+ if "_norm" in k:
+ log_metrics[f"{k}/"] = unfreeze(v)
+ elif "_hist" in k:
+ v = jax.tree_map(lambda x: jax.device_get(x), unfreeze(v))
+ v = jax.tree_map(
+ lambda x: wandb.Histogram(np_histogram=x),
+ v,
+ is_leaf=lambda x: isinstance(x, tuple),
+ )
+ log_metrics[f"{k}/"] = v
+ else:
+ log_metrics[k] = v
- }
wandb.log({**log_metrics, **self.state_dict})
===========changed ref 2===========
# module: tools.train.train
@dataclass
class TrainingArguments:
"""
Arguments pertaining to training parameters.
"""
output_dir: str = field(
metadata={
"help": "The output directory where the model predictions and checkpoints will be written."
},
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory. "
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(
default=False, metadata={"help": "Whether to run eval on the validation set."}
)
per_device_train_batch_size: int = field(
default=8,
metadata={"help": "Batch size per data parallel device for training."},
)
per_device_eval_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Batch size per data parallel device for evaluation. Same as training batch size if not set."
},
)
gradient_accumulation_steps: int = field(
default=1,
metadata={
"help": "Number of updates steps to accumulate before performing an update pass."
},
)
gradient_checkpointing: bool = field(
default=False, metadata={"help": "Use gradient checkpointing."}
)
learning_rate: float = field(
default=5e-5, metadata={"help": "The initial learning rate."}
)
optim: str = field(
default="distributed_shampoo",
metadata={
"help": 'The optimizer to use. Can be "distributed_shampoo" (default), "adam" or "adafactor"'
},
)
beta1: float = field(
default=0.9,
metadata={"help": "Beta1 for</s>
===========changed ref 3===========
# module: tools.train.train
@dataclass
class TrainingArguments:
# offset: 1
<s> )
beta1: float = field(
default=0.9,
metadata={"help": "Beta1 for Adam & Distributed Shampoo."},
)
beta2: float = field(
default=0.999,
metadata={"help": "Beta2 for for Adam & Distributed Shampoo."},
)
adam_epsilon: float = field(
default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."}
)
max_grad_norm: float = field(
default=1.0, metadata={"help": "Max gradient norm for Adafactor."}
)
block_size: int = field(
default=1024,
metadata={"help": "Chunked size for large layers with Distributed Shampoo."},
)
preconditioning_compute_steps: int = field(
default=10, metadata={"help": "Number of steps to update preconditioner."}
)
skip_preconditioning_dim_size_gt: int = field(
default=4096,
metadata={"help": "Max size for preconditioning with Distributed Shampoo."},
)
graft_type: str = field(
default="rmsprop_normalized",
metadata={
"help": "The type of grafting to use. Can be 'rmsprop_normalized' (default), 'rmsprop', 'adagrad', 'adagrad_normalized', 'sgd' or 'sqrt_n'"
},
)
optim_quantized: bool = field(
default=False,
metadata={
"help": "Whether to quantize optimizer (only supported with Distributed Shampoo)."
},
)
num_train_epochs: int = field(
default=3, metadata={"help": "Total number of training epochs to perform."}
)
</s>
|
tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices/sliced_transposed_product_concat
|
Modified
|
borisdayma~dalle-mini
|
b7b619a2ab6b9dec1812d074fbee7b5abafae158
|
feat(train): log norm and histograms (#143)
|
<6>:<add> axes: Axes to use for the contraction.
<13>:<add> mat=mat, block_size=block_size, axes=axes, precision=precision
<del> mat=mat, block_size=block_size, precision=precision
|
<s>.train.scalable_shampoo.symmetric_matrices.symmetric_matrices
+ @functools.partial(jax.jit, static_argnames=("block_size", "axes", "precision"))
- @functools.partial(jax.jit, static_argnames=("block_size", "precision"))
def sliced_transposed_product_concat(
mat,
block_size,
+ axes=(-1,),
precision=lax.Precision.DEFAULT,
):
<0> """Returns the concatenated slices representing mat*mat^T.
<1>
<2> Args:
<3> mat: The matrix for which we will compute mat*mat^T. It does not need to be
<4> square, and may be batched.
<5> block_size: The size of row blocks to compute.
<6> precision: The precision to use in each computation.
<7>
<8> Raises:
<9> ValueError: Raised when the specified block size does not evenly divide
<10> the number of rows of the input mat.
<11> """
<12> sliced_symmetric_matrix = sliced_transposed_product(
<13> mat=mat, block_size=block_size, precision=precision
<14> )
<15> return jnp.concatenate(sliced_symmetric_matrix.block_rows, axis=-1)
<16>
|
===========unchanged ref 0===========
at: tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices
product_with_transpose(mat1, mat2, axes, precision=lax.Precision.DEFAULT)
at: tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices.sliced_transposed_product
rank = len(mat.shape)
remaining_axes = set(range(rank)) - set(positive_axes)
===========changed ref 0===========
# module: tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices
def product_with_transpose(
mat1,
mat2,
+ axes,
precision=lax.Precision.DEFAULT,
):
"""Returns mat1 * mat2^T for two matrices (possibly batched).
The rows and columns are the last two dimensions for each matrix.
Args:
mat1: First matrix.
mat2: Second matrix.
+ axes: The axes over which to apply the product.
precision: JAX precision to use for the multiplication.
"""
+ return jnp.tensordot(a=mat1, b=mat2, axes=axes, precision=precision)
- return jnp.einsum("...ij,...kj->...ik", mat1, mat2, precision=precision)
===========changed ref 1===========
<s>: tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices
+ @functools.partial(jax.jit, static_argnames=("block_size", "axes", "precision"))
- @functools.partial(jax.jit, static_argnames=("block_size", "precision"))
def sliced_transposed_product(
mat,
block_size,
+ axes=(-1,),
precision=lax.Precision.DEFAULT,
):
+ """Returns the blocked slices representing a symmetric contraction.
- """Returns the blocked slices representing a symmetric matrix mat*mat^T.
+
+ Specifically, the output is a contraction of the input mat with itself, in the
+ specified axes.
Args:
- mat: The matrix for which we will compute mat*mat^T. It does not need to be
- square, and may be batched.
+ mat: The matrix for which we will compute a contraction with itself.
block_size: The size of row blocks to compute.
+ axes: Axes to use for the contraction.
precision: The precision to use in each computation.
Raises:
ValueError: Raised when the specified block size does not evenly divide
the number of rows of the input mat.
"""
+ rank = len(mat.shape)
+
+ def _make_axis_positive(ax):
+ assert -rank <= ax < rank
+ return ax + rank if ax < 0 else ax
+
+ positive_axes = [_make_axis_positive(ax) for ax in axes]
+ assert len(positive_axes) == len(axes)
+ remaining_axes = set(range(rank)) - set(positive_axes)
+ assert len(remaining_axes) == 1
+ remaining_ax = remaining_axes.pop()
+
+ num_rows = mat.shape[remaining_ax]
- num_rows = mat.shape[-2]
if num_rows % block_size != 0:
raise ValueError(
"The row dimension must be div</s>
===========changed ref 2===========
<s>scalable_shampoo.symmetric_matrices.symmetric_matrices
+ @functools.partial(jax.jit, static_argnames=("block_size", "axes", "precision"))
- @functools.partial(jax.jit, static_argnames=("block_size", "precision"))
def sliced_transposed_product(
mat,
block_size,
+ axes=(-1,),
precision=lax.Precision.DEFAULT,
):
# offset: 1
<s>[-2]
if num_rows % block_size != 0:
raise ValueError(
"The row dimension must be divisible by block_size. "
f"Instead got row dimension={num_rows} and block_size={block_size}."
)
+
+ block_rows = []
- block_rows = [
+ for i in range(num_rows // block_size):
+ start_indices = [0] * rank
+ start_indices[remaining_ax] = i * block_size
+
+ slice_sizes = list(mat.shape)
+ slice_sizes[remaining_ax] = block_size
+
+ slice_sizes_full = list(mat.shape)
+ slice_sizes_full[remaining_ax] = (i + 1) * block_size
+
+ block_rows.append(
+ product_with_transpose(
- product_with_transpose(
+ lax.dynamic_slice(
+ mat, start_indices=start_indices, slice_sizes=slice_sizes
+ ),
+ lax.dynamic_slice(
+ mat, start_indices=[0] * rank, slice_sizes=slice_sizes_full
+ ),
+ axes=(axes, axes),
- mat[Ellipsis, i * block_size : (i + 1) * block_size, :],
- mat[Ellipsis, 0 : (i + 1)</s>
===========changed ref 3===========
<s>scalable_shampoo.symmetric_matrices.symmetric_matrices
+ @functools.partial(jax.jit, static_argnames=("block_size", "axes", "precision"))
- @functools.partial(jax.jit, static_argnames=("block_size", "precision"))
def sliced_transposed_product(
mat,
block_size,
+ axes=(-1,),
precision=lax.Precision.DEFAULT,
):
# offset: 2
<s>_size, :],
+ precision=precision,
- precision,
+ )
)
- for i in range(num_rows // block_size)
- ]
+
return SlicedSymmetricMatrix(block_rows=block_rows)
===========changed ref 4===========
# module: tools.train.train
class MetricsLogger:
def log(self, metrics, prefix=None):
if jax.process_index() == 0:
+ log_metrics = {}
- log_metrics = {
- f"{prefix}/{k}" if prefix is not None else k: v
+ for k, v in metrics.items():
- for k, v in metrics.items()
+ if prefix is not None:
+ k = f"{prefix}/{k}"
+ if "_norm" in k:
+ log_metrics[f"{k}/"] = unfreeze(v)
+ elif "_hist" in k:
+ v = jax.tree_map(lambda x: jax.device_get(x), unfreeze(v))
+ v = jax.tree_map(
+ lambda x: wandb.Histogram(np_histogram=x),
+ v,
+ is_leaf=lambda x: isinstance(x, tuple),
+ )
+ log_metrics[f"{k}/"] = v
+ else:
+ log_metrics[k] = v
- }
wandb.log({**log_metrics, **self.state_dict})
|
tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices/update_sliced_rows
|
Modified
|
borisdayma~dalle-mini
|
b7b619a2ab6b9dec1812d074fbee7b5abafae158
|
feat(train): log norm and histograms (#143)
|
<11>:<add> axes: Axes to use for the contraction of the update.
<16>:<add> sym_prod = sliced_transposed_product(mat=mat, block_size=block_size, axes=axes)
<del> sym_prod = sliced_transposed_product(mat=mat, block_size=block_size)
|
# module: tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices
+ @functools.partial(jax.jit, static_argnames=("alpha", "beta", "axes"))
- @functools.partial(jax.jit, static_argnames=("alpha", "beta"))
def update_sliced_rows(
symmetric_matrix,
mat,
alpha,
beta,
+ axes=(-1,),
):
<0> """Implements the blocked equivalent of SYRK.
<1>
<2> Specifically, the symmetric matrix (represented using lower-triangular block
<3> rows) is updated using the sliced product of mat.
<4>
<5> Args:
<6> symmetric_matrix: The symmetric matrix to update.
<7> mat: The matrix to use for the update = mat * mat^T. The number of rows
<8> should match that of symmetric_matrix.
<9> alpha: The weight for the update.
<10> beta: The weight for the original symmetric matrix.
<11>
<12> Returns:
<13> The updated rows of alpha * mat * mat^T + beta * symmetric_matrix.
<14> """
<15> block_size = symmetric_matrix.block_rows[0].shape[-2]
<16> sym_prod = sliced_transposed_product(mat=mat, block_size=block_size)
<17> return SlicedSymmetricMatrix(
<18> block_rows=[
<19> update * alpha + row * beta
<20> for update, row in zip(sym_prod.block_rows, symmetric_matrix.block_rows)
<21> ]
<22> )
<23>
|
===========unchanged ref 0===========
at: functools
partial(func: Callable[..., _T], *args: Any, **kwargs: Any)
partial(func, *args, **keywords, /) -> function with partial application()
at: tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices
block_rows = [
block_rows_concat[
Ellipsis,
(k * (k + 1))
// 2
* block_size : (((k + 1) * (k + 2)) // 2 + 1)
* block_size,
]
for k in range(num_blocks)
]
at: tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices.materialize_matrix
blocks = [
[
block_row[Ellipsis, i * block_size : (i + 1) * block_size]
for i in range(k + 1)
]
for k, block_row in enumerate(block_rows)
]
off_diags = [[] for _ in range(num_blocks - 1)]
===========changed ref 0===========
# module: tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices
def product_with_transpose(
mat1,
mat2,
+ axes,
precision=lax.Precision.DEFAULT,
):
"""Returns mat1 * mat2^T for two matrices (possibly batched).
The rows and columns are the last two dimensions for each matrix.
Args:
mat1: First matrix.
mat2: Second matrix.
+ axes: The axes over which to apply the product.
precision: JAX precision to use for the multiplication.
"""
+ return jnp.tensordot(a=mat1, b=mat2, axes=axes, precision=precision)
- return jnp.einsum("...ij,...kj->...ik", mat1, mat2, precision=precision)
===========changed ref 1===========
<s>.train.scalable_shampoo.symmetric_matrices.symmetric_matrices
+ @functools.partial(jax.jit, static_argnames=("block_size", "axes", "precision"))
- @functools.partial(jax.jit, static_argnames=("block_size", "precision"))
def sliced_transposed_product_concat(
mat,
block_size,
+ axes=(-1,),
precision=lax.Precision.DEFAULT,
):
"""Returns the concatenated slices representing mat*mat^T.
Args:
mat: The matrix for which we will compute mat*mat^T. It does not need to be
square, and may be batched.
block_size: The size of row blocks to compute.
+ axes: Axes to use for the contraction.
precision: The precision to use in each computation.
Raises:
ValueError: Raised when the specified block size does not evenly divide
the number of rows of the input mat.
"""
sliced_symmetric_matrix = sliced_transposed_product(
+ mat=mat, block_size=block_size, axes=axes, precision=precision
- mat=mat, block_size=block_size, precision=precision
)
return jnp.concatenate(sliced_symmetric_matrix.block_rows, axis=-1)
===========changed ref 2===========
<s>: tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices
+ @functools.partial(jax.jit, static_argnames=("block_size", "axes", "precision"))
- @functools.partial(jax.jit, static_argnames=("block_size", "precision"))
def sliced_transposed_product(
mat,
block_size,
+ axes=(-1,),
precision=lax.Precision.DEFAULT,
):
+ """Returns the blocked slices representing a symmetric contraction.
- """Returns the blocked slices representing a symmetric matrix mat*mat^T.
+
+ Specifically, the output is a contraction of the input mat with itself, in the
+ specified axes.
Args:
- mat: The matrix for which we will compute mat*mat^T. It does not need to be
- square, and may be batched.
+ mat: The matrix for which we will compute a contraction with itself.
block_size: The size of row blocks to compute.
+ axes: Axes to use for the contraction.
precision: The precision to use in each computation.
Raises:
ValueError: Raised when the specified block size does not evenly divide
the number of rows of the input mat.
"""
+ rank = len(mat.shape)
+
+ def _make_axis_positive(ax):
+ assert -rank <= ax < rank
+ return ax + rank if ax < 0 else ax
+
+ positive_axes = [_make_axis_positive(ax) for ax in axes]
+ assert len(positive_axes) == len(axes)
+ remaining_axes = set(range(rank)) - set(positive_axes)
+ assert len(remaining_axes) == 1
+ remaining_ax = remaining_axes.pop()
+
+ num_rows = mat.shape[remaining_ax]
- num_rows = mat.shape[-2]
if num_rows % block_size != 0:
raise ValueError(
"The row dimension must be div</s>
===========changed ref 3===========
<s>scalable_shampoo.symmetric_matrices.symmetric_matrices
+ @functools.partial(jax.jit, static_argnames=("block_size", "axes", "precision"))
- @functools.partial(jax.jit, static_argnames=("block_size", "precision"))
def sliced_transposed_product(
mat,
block_size,
+ axes=(-1,),
precision=lax.Precision.DEFAULT,
):
# offset: 1
<s>[-2]
if num_rows % block_size != 0:
raise ValueError(
"The row dimension must be divisible by block_size. "
f"Instead got row dimension={num_rows} and block_size={block_size}."
)
+
+ block_rows = []
- block_rows = [
+ for i in range(num_rows // block_size):
+ start_indices = [0] * rank
+ start_indices[remaining_ax] = i * block_size
+
+ slice_sizes = list(mat.shape)
+ slice_sizes[remaining_ax] = block_size
+
+ slice_sizes_full = list(mat.shape)
+ slice_sizes_full[remaining_ax] = (i + 1) * block_size
+
+ block_rows.append(
+ product_with_transpose(
- product_with_transpose(
+ lax.dynamic_slice(
+ mat, start_indices=start_indices, slice_sizes=slice_sizes
+ ),
+ lax.dynamic_slice(
+ mat, start_indices=[0] * rank, slice_sizes=slice_sizes_full
+ ),
+ axes=(axes, axes),
- mat[Ellipsis, i * block_size : (i + 1) * block_size, :],
- mat[Ellipsis, 0 : (i + 1)</s>
|
dalle_mini.model.configuration/DalleBartConfig.__init__
|
Modified
|
borisdayma~dalle-mini
|
542378c30e591b7d3e40e8bfbd716f7cc49fb6cd
|
feat: implement transformer variants (#144)
|
<0>:<add> # text normalizer
<1>:<add>
<add> # transformer variants
<add> self.head_scale = head_scale # per Normformer
<add> assert ln_type in [
<add> "rmsnorm",
<add> "layernorm",
<add> ], "ln_type must be 'rmsnorm' or 'layernorm'"
<add> self.ln_type = ln_type
<add> assert ln_positions in [
<add> "normformer",
<add> "swinv2",
<add> "deepnet",
<add> ], "ln_positions must be 'normformer', 'swinv2' or 'deepnet'"
<add> self.ln_positions = ln_positions
<add> self.use_cosine_attention = use_cosine_attention
<add> self.tau_init = tau_init
<add> self.use_deepnet_scaling = use_deepnet_scaling
<add> self.use_glu = use_glu
<add>
<add> # common parameters
<17>:<del> self.encoder_layerdrop = encoder_layerdrop
<18>:<del> self.decoder_layerdrop = decoder_layerdrop
<19>:<del> self.classifier_dropout = classifier_dropout
|
<s> ln_positions="deepnet", # layer normalization positions, "normformer", "swinv2", "deepnet" (same as post-ln)
+ use_cosine_attention=False, # used in Swin v2
+ tau_init=0.05, # used only in cosine attention (Swin v2)
+ use_deepnet_scaling=False, # used in Deepnet
+ use_glu=False, # "GLU Variants Improve Transformer"
**kwargs,
):
<0> self.normalize_text = normalize_text
<1> self.encoder_vocab_size = encoder_vocab_size
<2> self.image_vocab_size = image_vocab_size
<3> self.image_length = image_length
<4> self.max_text_length = max_text_length
<5> self.d_model = d_model
<6> self.encoder_ffn_dim = encoder_ffn_dim
<7> self.encoder_layers = encoder_layers
<8> self.encoder_attention_heads = encoder_attention_heads
<9> self.decoder_ffn_dim = decoder_ffn_dim
<10> self.decoder_layers = decoder_layers
<11> self.decoder_attention_heads = decoder_attention_heads
<12> self.dropout = dropout
<13> self.attention_dropout = attention_dropout
<14> self.activation_dropout = activation_dropout
<15> self.activation_function = activation_function
<16> self.init_std = init_std
<17> self.encoder_layerdrop = encoder_layerdrop
<18> self.decoder_layerdrop = decoder_layerdrop
<19> self.classifier_dropout = classifier_dropout
<20> self.use_cache = use_cache
<21> self.gradient_checkpointing = gradient_checkpointing
<22> self.scale_embedding = (
<23> scale_embedding # scale factor will be sqrt(d_model) if True
<24> )
<25>
<26> # special token id's are appended to vocab if not provided
<27> decoder_start_token_id = kwargs.pop("decoder_start_token_id", image_vocab_size)
<28> bos_</s>
|
===========below chunk 0===========
<s>net", # layer normalization positions, "normformer", "swinv2", "deepnet" (same as post-ln)
+ use_cosine_attention=False, # used in Swin v2
+ tau_init=0.05, # used only in cosine attention (Swin v2)
+ use_deepnet_scaling=False, # used in Deepnet
+ use_glu=False, # "GLU Variants Improve Transformer"
**kwargs,
):
# offset: 1
pad_token_id = kwargs.pop("pad_token_id", image_vocab_size)
eos_token_id = kwargs.pop("eos_token_id", image_vocab_size)
# we generate to image_length + 1 (for bos) by default
min_length = kwargs.pop("min_length", image_length + 1)
max_length = kwargs.pop("max_length", image_length + 1)
super().__init__(
# args required in parent class
is_encoder_decoder=is_encoder_decoder,
tie_word_embeddings=tie_word_embeddings,
forced_eos_token_id=forced_eos_token_id,
decoder_start_token_id=decoder_start_token_id,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
min_length=min_length,
max_length=max_length,
do_sample=do_sample,
**kwargs,
)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get(
"force_bos_token_to_be_generated", False
):
self.forced_bos_token_id = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_</s>
===========below chunk 1===========
<s>net", # layer normalization positions, "normformer", "swinv2", "deepnet" (same as post-ln)
+ use_cosine_attention=False, # used in Swin v2
+ tau_init=0.05, # used only in cosine attention (Swin v2)
+ use_deepnet_scaling=False, # used in Deepnet
+ use_glu=False, # "GLU Variants Improve Transformer"
**kwargs,
):
# offset: 2
<s>.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions."
"The config can simply be saved and uploaded again to be fixed."
)
===========unchanged ref 0===========
at: transformers.configuration_utils.PretrainedConfig
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
_auto_class: Optional[str] = None
__init__(**kwargs)
__init__(self, **kwargs)
at: typing.MutableMapping
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
pop(key: _KT) -> _VT
|
dalle_mini.model.utils/PretrainedFromWandbMixin.from_pretrained
|
Modified
|
borisdayma~dalle-mini
|
542378c30e591b7d3e40e8bfbd716f7cc49fb6cd
|
feat: implement transformer variants (#144)
|
<1>:<add> Initializes from a wandb artifact or delegates loading to the superclass.
<del> Initializes from a wandb artifact, google bucket path or delegates loading to the superclass.
<4>:<add> if ":" in pretrained_model_name_or_path and not os.path.isdir(
<del> if (
<5>:<add> pretrained_model_name_or_path
<del> ":" in pretrained_model_name_or_path
<6>:<del> and not os.path.isdir(pretrained_model_name_or_path)
<7>:<del> and not pretrained_model_name_or_path.startswith("gs")
|
# module: dalle_mini.model.utils
class PretrainedFromWandbMixin:
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
<0> """
<1> Initializes from a wandb artifact, google bucket path or delegates loading to the superclass.
<2> """
<3> with tempfile.TemporaryDirectory() as tmp_dir: # avoid multiple artifact copies
<4> if (
<5> ":" in pretrained_model_name_or_path
<6> and not os.path.isdir(pretrained_model_name_or_path)
<7> and not pretrained_model_name_or_path.startswith("gs")
<8> ):
<9> # wandb artifact
<10> if wandb.run is not None:
<11> artifact = wandb.run.use_artifact(pretrained_model_name_or_path)
<12> else:
<13> artifact = wandb.Api().artifact(pretrained_model_name_or_path)
<14> pretrained_model_name_or_path = artifact.download(tmp_dir)
<15>
<16> return super(PretrainedFromWandbMixin, cls).from_pretrained(
<17> pretrained_model_name_or_path, *model_args, **kwargs
<18> )
<19>
|
===========unchanged ref 0===========
at: dalle_mini.model.utils
PretrainedFromWandbMixin()
at: os.path
isdir(s: AnyPath) -> bool
at: tempfile
TemporaryDirectory(suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=...)
at: wandb
Api = PublicApi
run: Optional["wandb_sdk.wandb_run.Run"] = None
at: wandb.apis.public.Api
_HTTP_TIMEOUT = env.get_http_timeout(9)
VIEWER_QUERY = gql(
"""
query Viewer{
viewer {
id
flags
entity
username
email
admin
apiKeys {
edges {
node {
id
name
description
}
}
}
teams {
edges {
node {
name
}
}
}
}
}
"""
)
USERS_QUERY = gql(
"""
query SearchUsers($query: String) {
users(query: $query) {
edges {
node {
id
flags
entity
admin
email
deletedAt
username
apiKeys {
edges {
node {
id
name
description
}
}
}
teams {
edges {
node {
name
}
}
}
}
}
}
}
"""
)
===========unchanged ref 1===========
CREATE_PROJECT = gql(
"""
mutation upsertModel(
$description: String
$entityName: String
$id: String
$name: String
$framework: String
$access: String
$views: JSONString
) {
upsertModel(
input: {
description: $description
entityName: $entityName
id: $id
name: $name
framework: $framework
access: $access
views: $views
}
) {
project {
id
name
entityName
description
access
views
}
model {
id
name
entityName
description
access
views
}
inserted
}
}
"""
)
artifact(name, type=None)
at: wandb.apis.public.Artifact
QUERY = gql(
"""
query ArtifactWithCurrentManifest(
$id: ID!,
) {
artifact(id: $id) {
currentManifest {
id
file {
id
directUrl
}
}
...ArtifactFragment
}
}
%s
"""
% ARTIFACT_FRAGMENT
)
download(root=None, recursive=False)
at: wandb.sdk.wandb_run.Run
_telemetry_obj: telemetry.TelemetryRecord
_telemetry_obj_active: bool
_telemetry_obj_dirty: bool
_telemetry_obj_flushed: bytes
_teardown_hooks: List[TeardownHook]
_tags: Optional[Tuple[Any, ...]]
_entity: Optional[str]
_project: Optional[str]
_group: Optional[str]
_job_type: Optional[str]
_name: Optional[str]
_notes: Optional[str]
_run_obj: Optional[RunRecord]
_run_obj_offline: Optional[RunRecord]
===========unchanged ref 2===========
_backend: Optional["wandb.sdk.backend.backend.Backend"]
_internal_run_interface: Optional[
Union[
"wandb.sdk.interface.interface_queue.InterfaceQueue",
"wandb.sdk.interface.interface_grpc.InterfaceGrpc",
]
]
_wl: Optional[_WandbSetup]
_out_redir: Optional[redirect.RedirectBase]
_err_redir: Optional[redirect.RedirectBase]
_redirect_cb: Optional[Callable[[str, str], None]]
_redirect_raw_cb: Optional[Callable[[str, str], None]]
_output_writer: Optional["filesystem.CRDedupedFile"]
_quiet: Optional[bool]
_atexit_cleanup_called: bool
_hooks: Optional[ExitHooks]
_exit_code: Optional[int]
_run_status_checker: Optional[RunStatusChecker]
_check_version: Optional["CheckVersionResponse"]
_sampled_history: Optional["SampledHistoryResponse"]
_final_summary: Optional["GetSummaryResponse"]
_poll_exit_handle: Optional[MailboxHandle]
_poll_exit_response: Optional[PollExitResponse]
_server_info_response: Optional[ServerInfoResponse]
_stdout_slave_fd: Optional[int]
_stderr_slave_fd: Optional[int]
_artifact_slots: List[str]
_init_pid: int
_attach_pid: int
_iface_pid: Optional[int]
_iface_port: Optional[int]
_attach_id: Optional[str]
_is_attached: bool
_settings: Settings
_launch_artifacts: Optional[Dict[str, Any]]
_printer: Union["PrinterTerm", "PrinterJupyter"]
===========unchanged ref 3===========
use_artifact(self, artifact_or_name: Union[str, public.Artifact, Artifact], type: Optional[str]=None, aliases: Optional[List[str]]=None, use_as: Optional[str]=None) -> Union[public.Artifact, Artifact]
===========changed ref 0===========
<s> ln_positions="deepnet", # layer normalization positions, "normformer", "swinv2", "deepnet" (same as post-ln)
+ use_cosine_attention=False, # used in Swin v2
+ tau_init=0.05, # used only in cosine attention (Swin v2)
+ use_deepnet_scaling=False, # used in Deepnet
+ use_glu=False, # "GLU Variants Improve Transformer"
**kwargs,
):
+ # text normalizer
self.normalize_text = normalize_text
+
+ # transformer variants
+ self.head_scale = head_scale # per Normformer
+ assert ln_type in [
+ "rmsnorm",
+ "layernorm",
+ ], "ln_type must be 'rmsnorm' or 'layernorm'"
+ self.ln_type = ln_type
+ assert ln_positions in [
+ "normformer",
+ "swinv2",
+ "deepnet",
+ ], "ln_positions must be 'normformer', 'swinv2' or 'deepnet'"
+ self.ln_positions = ln_positions
+ self.use_cosine_attention = use_cosine_attention
+ self.tau_init = tau_init
+ self.use_deepnet_scaling = use_deepnet_scaling
+ self.use_glu = use_glu
+
+ # common parameters
self.encoder_vocab_size = encoder_vocab_size
self.image_vocab_size = image_vocab_size
self.image_length = image_length
self.max_text_length = max_text_length
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder</s>
|
dalle_mini.model.modeling/FlaxBartAttention.setup
|
Modified
|
borisdayma~dalle-mini
|
542378c30e591b7d3e40e8bfbd716f7cc49fb6cd
|
feat: implement transformer variants (#144)
|
<12>:<del> kernel_init=jax.nn.initializers.normal(self.config.init_std),
<15>:<add> gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
<add> self.config
<add> )
<del> self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
<16>:<del> self.out_proj = dense()
<18>:<add> self.q_proj = dense(
<add> kernel_init=deepnet_init()
<add> if self.config.use_deepnet_scaling
<add> else jax.nn.initializers.normal(self.config.init_std)
<add> )
<add> self.k_proj = dense(
<add> kernel_init=deepnet_init()
<add> if self.config.use_deepnet_scaling
<add> else jax.nn.initializers.normal(self.config.init_std)
<add> )
<add> self.v_proj = dense(
<add> kernel_init=deepnet_init(gain)
<add> if self.config.use_deepnet_scaling
<add> else jax.nn.initializers.normal(self.config.init_std)
<add> )
<add> self.out_proj = dense
|
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def setup(self) -> None:
<0> self.head_dim = self.embed_dim // self.num_heads
<1> if self.head_dim * self.num_heads != self.embed_dim:
<2> raise ValueError(
<3> f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
<4> f" and `num_heads`: {self.num_heads})."
<5> )
<6>
<7> dense = partial(
<8> nn.Dense,
<9> self.embed_dim,
<10> use_bias=self.bias,
<11> dtype=self.dtype,
<12> kernel_init=jax.nn.initializers.normal(self.config.init_std),
<13> )
<14>
<15> self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
<16> self.out_proj = dense()
<17>
<18> self.dropout_layer = nn.Dropout(rate=self.dropout)
<19>
<20> if self.causal:
<21> # used only in decoder
<22> self.causal_mask = make_causal_mask(
<23> jnp.ones((1, self.config.image_length), dtype="bool"), dtype="bool"
<24> )
<25>
|
===========unchanged ref 0===========
at: functools
partial(func: Callable[..., _T], *args: Any, **kwargs: Any)
partial(func, *args, **keywords, /) -> function with partial application()
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.init_std = init_std
at: transformers.models.bart.modeling_flax_bart.FlaxBartAttention
config: BartConfig
embed_dim: int
num_heads: int
dropout: float = 0.0
causal: bool = False
bias: bool = True
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
setup(self) -> None
===========changed ref 0===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
"""
Edits:
- causal mask is used only in decoder and considers image_length
+ - scale attention heads per NormFormer paper
"""
===========changed ref 1===========
# module: dalle_mini.model.modeling
+ # deepnet initialization
+ def deepnet_init(gain=1):
+ init = jax.nn.initializers.glorot_normal()
+
+ def _init(*args, **kwargs):
+ return gain * init(*args, **kwargs)
+
+ return _init
+
===========changed ref 2===========
# module: dalle_mini.model.utils
- def copy_blobs(source_path, dest_path):
- assert source_path.startswith("gs://")
- from google.cloud import storage
-
- bucket_path = Path(source_path[5:])
- bucket, dir_path = str(bucket_path).split("/", 1)
- client = storage.Client()
- bucket = client.bucket(bucket)
- blobs = client.list_blobs(bucket, prefix=f"{dir_path}/")
- for blob in blobs:
- dest_name = str(Path(dest_path) / Path(blob.name).name)
- blob.download_to_filename(dest_name)
-
===========changed ref 3===========
# module: dalle_mini.model.utils
class PretrainedFromWandbMixin:
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
"""
+ Initializes from a wandb artifact or delegates loading to the superclass.
- Initializes from a wandb artifact, google bucket path or delegates loading to the superclass.
"""
with tempfile.TemporaryDirectory() as tmp_dir: # avoid multiple artifact copies
+ if ":" in pretrained_model_name_or_path and not os.path.isdir(
- if (
+ pretrained_model_name_or_path
- ":" in pretrained_model_name_or_path
- and not os.path.isdir(pretrained_model_name_or_path)
- and not pretrained_model_name_or_path.startswith("gs")
):
# wandb artifact
if wandb.run is not None:
artifact = wandb.run.use_artifact(pretrained_model_name_or_path)
else:
artifact = wandb.Api().artifact(pretrained_model_name_or_path)
pretrained_model_name_or_path = artifact.download(tmp_dir)
return super(PretrainedFromWandbMixin, cls).from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs
)
===========changed ref 4===========
<s> ln_positions="deepnet", # layer normalization positions, "normformer", "swinv2", "deepnet" (same as post-ln)
+ use_cosine_attention=False, # used in Swin v2
+ tau_init=0.05, # used only in cosine attention (Swin v2)
+ use_deepnet_scaling=False, # used in Deepnet
+ use_glu=False, # "GLU Variants Improve Transformer"
**kwargs,
):
+ # text normalizer
self.normalize_text = normalize_text
+
+ # transformer variants
+ self.head_scale = head_scale # per Normformer
+ assert ln_type in [
+ "rmsnorm",
+ "layernorm",
+ ], "ln_type must be 'rmsnorm' or 'layernorm'"
+ self.ln_type = ln_type
+ assert ln_positions in [
+ "normformer",
+ "swinv2",
+ "deepnet",
+ ], "ln_positions must be 'normformer', 'swinv2' or 'deepnet'"
+ self.ln_positions = ln_positions
+ self.use_cosine_attention = use_cosine_attention
+ self.tau_init = tau_init
+ self.use_deepnet_scaling = use_deepnet_scaling
+ self.use_glu = use_glu
+
+ # common parameters
self.encoder_vocab_size = encoder_vocab_size
self.image_vocab_size = image_vocab_size
self.image_length = image_length
self.max_text_length = max_text_length
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder</s>
===========changed ref 5===========
<s>net", # layer normalization positions, "normformer", "swinv2", "deepnet" (same as post-ln)
+ use_cosine_attention=False, # used in Swin v2
+ tau_init=0.05, # used only in cosine attention (Swin v2)
+ use_deepnet_scaling=False, # used in Deepnet
+ use_glu=False, # "GLU Variants Improve Transformer"
**kwargs,
):
# offset: 1
<s>dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
- self.encoder_layerdrop = encoder_layerdrop
- self.decoder_layerdrop = decoder_layerdrop
- self.classifier_dropout = classifier_dropout
self.use_cache = use_cache
self.gradient_checkpointing = gradient_checkpointing
self.scale_embedding = (
scale_embedding # scale factor will be sqrt(d_model) if True
)
# special token id's are appended to vocab if not provided
decoder_start_token_id = kwargs.pop("decoder_start_token_id", image_vocab_size)
bos_token_id = kwargs.pop("bos_token_id", image_vocab_size)
pad_token_id = kwargs.pop("pad_token_id", image_vocab_size)
eos_token_id = kwargs.pop("eos_token_id", image_vocab_size)
# we generate to image_length + 1 (for bos) by default
min_length = kwargs.pop("min_length", image_length + 1)
max_length = kwargs.pop("max_length</s>
|
dalle_mini.model.modeling/FlaxBartEncoder.setup
|
Modified
|
borisdayma~dalle-mini
|
542378c30e591b7d3e40e8bfbd716f7cc49fb6cd
|
feat: implement transformer variants (#144)
|
<12>:<add> embedding_init=deepnet_init()
<add> if self.config.use_deepnet_scaling
<add> else jax.nn.initializers.normal(self.config.init_std),
<del> embedding_init=jax.nn.initializers.normal(self.config.init_std),
<15>:<add> self.layernorm_embedding = norm(
<add> self.config.ln_type, dtype=self.dtype, epsilon=1e-05
<add> )
<del> self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
|
# module: dalle_mini.model.modeling
class FlaxBartEncoder(FlaxBartEncoder):
def setup(self):
<0> self.dropout_layer = nn.Dropout(rate=self.config.dropout)
<1>
<2> embed_dim = self.config.d_model
<3> self.padding_idx = self.config.pad_token_id
<4> self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
<5>
<6> # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
<7> # and adjust num_embeddings appropriately. Other models don't have this hack
<8> self.offset = 0
<9> self.embed_positions = nn.Embed(
<10> self.config.max_text_length + self.offset,
<11> embed_dim,
<12> embedding_init=jax.nn.initializers.normal(self.config.init_std),
<13> )
<14> self.layers = FlaxBartEncoderLayerCollection(self.config, self.dtype)
<15> self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
<16>
|
===========unchanged ref 0===========
at: math
sqrt(x: SupportsFloat, /) -> float
at: transformers.configuration_utils.PretrainedConfig.__init__
self.pad_token_id = kwargs.pop("pad_token_id", None)
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.d_model = d_model
self.dropout = dropout
self.init_std = init_std
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
at: transformers.models.bart.modeling_flax_bart.FlaxBartEncoder
config: BartConfig
embed_tokens: nn.Embed
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
setup(self)
===========changed ref 0===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoderLayerCollection(nn.Module):
- class FlaxBartEncoderLayerCollection(FlaxBartEncoderLayerCollection):
+ config: DalleBartConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
"""
Edits:
- use custom FlaxBartEncoderLayer
- allow Gradient Checkpointing (nn.remat)
"""
===========changed ref 1===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayerCollection(nn.Module):
- class FlaxBartDecoderLayerCollection(FlaxBartDecoderLayerCollection):
+ config: DalleBartConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
"""
Edits:
- use custom FlaxBartDecoderLayer
- allow Gradient Checkpointing (nn.remat)
"""
===========changed ref 2===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayer(nn.Module):
- class FlaxBartDecoderLayer(FlaxBartDecoderLayer):
"""
Edits:
- no bias
+ - use custom FlaxBartAttention
- - uses custom FlaxBartAttention
"""
===========changed ref 3===========
# module: dalle_mini.model.modeling
+ def norm(type, *args, **kwargs):
+ if type == "rmsnorm":
+ return RMSNorm(*args, **kwargs)
+ elif type == "layernorm":
+ return nn.LayerNorm(*args, **kwargs)
+ else:
+ raise ValueError(f"Unknown norm type {type}")
+
===========changed ref 4===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayerCollection(nn.Module):
- class FlaxBartDecoderLayerCollection(FlaxBartDecoderLayerCollection):
- def setup(self):
- layer_module = (
- nn.remat(FlaxBartDecoderLayer, concrete=True)
- if self.config.gradient_checkpointing
- else FlaxBartDecoderLayer
- )
- self.layers = [
- layer_module(self.config, name=str(i), dtype=self.dtype)
- for i in range(self.config.decoder_layers)
- ]
- self.layerdrop = self.config.decoder_layerdrop
-
===========changed ref 5===========
# module: dalle_mini.model.modeling
+ class RMSNorm(nn.Module):
+ def _compute_rms_sq(self, x, axes):
+ x = jnp.asarray(x, jnp.promote_types(jnp.float32, jnp.result_type(x)))
+ rms_sq = jnp.mean(jax.lax.square(x), axes)
+ return rms_sq
+
===========changed ref 6===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoderLayerCollection(nn.Module):
- class FlaxBartEncoderLayerCollection(FlaxBartEncoderLayerCollection):
- def setup(self):
- layer_module = (
- nn.remat(FlaxBartEncoderLayer, concrete=True)
- if self.config.gradient_checkpointing
- else FlaxBartEncoderLayer
- )
- self.layers = [
- layer_module(self.config, name=str(i), dtype=self.dtype)
- for i in range(self.config.encoder_layers)
- ]
- self.layerdrop = self.config.encoder_layerdrop
-
===========changed ref 7===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
"""
Edits:
- causal mask is used only in decoder and considers image_length
+ - scale attention heads per NormFormer paper
"""
===========changed ref 8===========
# module: dalle_mini.model.modeling
+ # deepnet initialization
+ def deepnet_init(gain=1):
+ init = jax.nn.initializers.glorot_normal()
+
+ def _init(*args, **kwargs):
+ return gain * init(*args, **kwargs)
+
+ return _init
+
===========changed ref 9===========
# module: dalle_mini.model.modeling
+ class RMSNorm(nn.Module):
+ @nn.compact
+ def __call__(self, x):
+ reduction_axes = (-1,)
+ feature_axes = (-1,)
+
+ rms_sq = self._compute_rms_sq(x, reduction_axes)
+
+ return self._normalize(
+ self,
+ x,
+ rms_sq,
+ reduction_axes,
+ feature_axes,
+ self.dtype,
+ self.param_dtype,
+ self.epsilon,
+ self.use_scale,
+ self.scale_init,
+ )
+
===========changed ref 10===========
# module: dalle_mini.model.modeling
+ class RMSNorm(nn.Module):
+ """
+ From "Root Mean Square Layer Normalization" by https://arxiv.org/abs/1910.07467
+
+ Adapted from flax.linen.LayerNorm
+ """
+
+ epsilon: float = 1e-6
+ dtype: Any = jnp.float32
+ param_dtype: Any = jnp.float32
+ use_scale: bool = True
+ scale_init: Any = jax.nn.initializers.ones
+
===========changed ref 11===========
# module: dalle_mini.model.modeling
+ class RMSNorm(nn.Module):
+ def _normalize(
+ self,
+ mdl,
+ x,
+ rms_sq,
+ reduction_axes,
+ feature_axes,
+ dtype,
+ param_dtype,
+ epsilon,
+ use_scale,
+ scale_init,
+ ):
+ reduction_axes = nn.normalization._canonicalize_axes(x.ndim, reduction_axes)
+ feature_axes = nn.normalization._canonicalize_axes(x.ndim, feature_axes)
+ stats_shape = list(x.shape)
+ for axis in reduction_axes:
+ stats_shape[axis] = 1
+ rms_sq = rms_sq.reshape(stats_shape)
+ feature_shape = [1] * x.ndim
+ reduced_feature_shape = []
+ for ax in feature_axes:
+ feature_shape[ax] = x.shape[ax]
+ reduced_feature_shape.append(x.shape[ax])
+ mul = lax.rsqrt(rms_sq + epsilon)
+ if use_scale:
+ scale = mdl.param(
+ "scale", scale_init, reduced_feature_shape, param_dtype
+ ).reshape(feature_shape)
+ mul *= scale
+ y = mul * x
+ return jnp.asarray(y, dtype)
+
|
dalle_mini.model.modeling/FlaxBartDecoder.setup
|
Modified
|
borisdayma~dalle-mini
|
542378c30e591b7d3e40e8bfbd716f7cc49fb6cd
|
feat: implement transformer variants (#144)
|
<14>:<add> embedding_init=deepnet_init()
<add> if self.config.use_deepnet_scaling
<add> else jax.nn.initializers.normal(self.config.init_std),
<del> embedding_init=jax.nn.initializers.normal(self.config.init_std),
<18>:<add> self.layernorm_embedding = norm(
<add> self.config.ln_type, dtype=self.dtype, epsilon=1e-05
<add> )
<del> self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
|
# module: dalle_mini.model.modeling
class FlaxBartDecoder(FlaxBartDecoder):
def setup(self):
<0> self.dropout_layer = nn.Dropout(rate=self.config.dropout)
<1>
<2> embed_dim = self.config.d_model
<3> self.padding_idx = self.config.pad_token_id
<4> self.embed_scale = (
<5> math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
<6> )
<7>
<8> # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
<9> # and adjust num_embeddings appropriately. Other models don't have this hack
<10> self.offset = 0
<11> self.embed_positions = nn.Embed(
<12> self.config.image_length + self.offset, # image length for BOS
<13> embed_dim,
<14> embedding_init=jax.nn.initializers.normal(self.config.init_std),
<15> )
<16>
<17> self.layers = FlaxBartDecoderLayerCollection(self.config, self.dtype)
<18> self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
<19>
|
===========unchanged ref 0===========
at: math
sqrt(x: SupportsFloat, /) -> float
at: transformers.configuration_utils.PretrainedConfig.__init__
self.pad_token_id = kwargs.pop("pad_token_id", None)
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.d_model = d_model
self.dropout = dropout
self.init_std = init_std
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
at: transformers.models.bart.modeling_flax_bart.FlaxBartDecoder
config: BartConfig
embed_tokens: nn.Embed
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
setup(self)
===========changed ref 0===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayerCollection(nn.Module):
- class FlaxBartDecoderLayerCollection(FlaxBartDecoderLayerCollection):
+ config: DalleBartConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
"""
Edits:
- use custom FlaxBartDecoderLayer
- allow Gradient Checkpointing (nn.remat)
"""
===========changed ref 1===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayer(nn.Module):
- class FlaxBartDecoderLayer(FlaxBartDecoderLayer):
"""
Edits:
- no bias
+ - use custom FlaxBartAttention
- - uses custom FlaxBartAttention
"""
===========changed ref 2===========
# module: dalle_mini.model.modeling
+ def norm(type, *args, **kwargs):
+ if type == "rmsnorm":
+ return RMSNorm(*args, **kwargs)
+ elif type == "layernorm":
+ return nn.LayerNorm(*args, **kwargs)
+ else:
+ raise ValueError(f"Unknown norm type {type}")
+
===========changed ref 3===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayerCollection(nn.Module):
- class FlaxBartDecoderLayerCollection(FlaxBartDecoderLayerCollection):
- def setup(self):
- layer_module = (
- nn.remat(FlaxBartDecoderLayer, concrete=True)
- if self.config.gradient_checkpointing
- else FlaxBartDecoderLayer
- )
- self.layers = [
- layer_module(self.config, name=str(i), dtype=self.dtype)
- for i in range(self.config.decoder_layers)
- ]
- self.layerdrop = self.config.decoder_layerdrop
-
===========changed ref 4===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoderLayerCollection(nn.Module):
- class FlaxBartEncoderLayerCollection(FlaxBartEncoderLayerCollection):
+ config: DalleBartConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
"""
Edits:
- use custom FlaxBartEncoderLayer
- allow Gradient Checkpointing (nn.remat)
"""
===========changed ref 5===========
# module: dalle_mini.model.modeling
+ class RMSNorm(nn.Module):
+ def _compute_rms_sq(self, x, axes):
+ x = jnp.asarray(x, jnp.promote_types(jnp.float32, jnp.result_type(x)))
+ rms_sq = jnp.mean(jax.lax.square(x), axes)
+ return rms_sq
+
===========changed ref 6===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoderLayerCollection(nn.Module):
- class FlaxBartEncoderLayerCollection(FlaxBartEncoderLayerCollection):
- def setup(self):
- layer_module = (
- nn.remat(FlaxBartEncoderLayer, concrete=True)
- if self.config.gradient_checkpointing
- else FlaxBartEncoderLayer
- )
- self.layers = [
- layer_module(self.config, name=str(i), dtype=self.dtype)
- for i in range(self.config.encoder_layers)
- ]
- self.layerdrop = self.config.encoder_layerdrop
-
===========changed ref 7===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
"""
Edits:
- causal mask is used only in decoder and considers image_length
+ - scale attention heads per NormFormer paper
"""
===========changed ref 8===========
# module: dalle_mini.model.modeling
+ # deepnet initialization
+ def deepnet_init(gain=1):
+ init = jax.nn.initializers.glorot_normal()
+
+ def _init(*args, **kwargs):
+ return gain * init(*args, **kwargs)
+
+ return _init
+
===========changed ref 9===========
# module: dalle_mini.model.modeling
+ class RMSNorm(nn.Module):
+ @nn.compact
+ def __call__(self, x):
+ reduction_axes = (-1,)
+ feature_axes = (-1,)
+
+ rms_sq = self._compute_rms_sq(x, reduction_axes)
+
+ return self._normalize(
+ self,
+ x,
+ rms_sq,
+ reduction_axes,
+ feature_axes,
+ self.dtype,
+ self.param_dtype,
+ self.epsilon,
+ self.use_scale,
+ self.scale_init,
+ )
+
===========changed ref 10===========
# module: dalle_mini.model.modeling
+ class RMSNorm(nn.Module):
+ """
+ From "Root Mean Square Layer Normalization" by https://arxiv.org/abs/1910.07467
+
+ Adapted from flax.linen.LayerNorm
+ """
+
+ epsilon: float = 1e-6
+ dtype: Any = jnp.float32
+ param_dtype: Any = jnp.float32
+ use_scale: bool = True
+ scale_init: Any = jax.nn.initializers.ones
+
===========changed ref 11===========
# module: dalle_mini.model.modeling
class FlaxBartEncoder(FlaxBartEncoder):
def setup(self):
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
embed_dim = self.config.d_model
self.padding_idx = self.config.pad_token_id
self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
# Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 0
self.embed_positions = nn.Embed(
self.config.max_text_length + self.offset,
embed_dim,
+ embedding_init=deepnet_init()
+ if self.config.use_deepnet_scaling
+ else jax.nn.initializers.normal(self.config.init_std),
- embedding_init=jax.nn.initializers.normal(self.config.init_std),
)
self.layers = FlaxBartEncoderLayerCollection(self.config, self.dtype)
+ self.layernorm_embedding = norm(
+ self.config.ln_type, dtype=self.dtype, epsilon=1e-05
+ )
- self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
|
dalle_mini.model.modeling/FlaxBartModule.setup
|
Modified
|
borisdayma~dalle-mini
|
542378c30e591b7d3e40e8bfbd716f7cc49fb6cd
|
feat: implement transformer variants (#144)
|
<3>:<add> embedding_init=deepnet_init()
<add> if self.config.use_deepnet_scaling
<add> else jax.nn.initializers.normal(self.config.init_std),
<del> embedding_init=jax.nn.initializers.normal(self.config.init_std),
<8>:<add> embedding_init=deepnet_init()
<add> if self.config.use_deepnet_scaling
<add> else jax.nn.initializers.normal(self.config.init_std),
<del> embedding_init=jax.nn.initializers.normal(self.config.init_std),
|
# module: dalle_mini.model.modeling
class FlaxBartModule(FlaxBartModule):
def setup(self):
<0> encoder_embed_tokens = nn.Embed(
<1> self.config.encoder_vocab_size,
<2> self.config.d_model,
<3> embedding_init=jax.nn.initializers.normal(self.config.init_std),
<4> )
<5> decoder_embed_tokens = nn.Embed(
<6> self.config.image_vocab_size + 1, # image vocab size + 1 for BOS
<7> self.config.d_model,
<8> embedding_init=jax.nn.initializers.normal(self.config.init_std),
<9> )
<10>
<11> self.encoder = FlaxBartEncoder(
<12> self.config, dtype=self.dtype, embed_tokens=encoder_embed_tokens
<13> )
<14> self.decoder = FlaxBartDecoder(
<15> self.config, dtype=self.dtype, embed_tokens=decoder_embed_tokens
<16> )
<17>
|
===========unchanged ref 0===========
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.d_model = d_model
self.init_std = init_std
at: transformers.models.bart.modeling_flax_bart.FlaxBartModule
config: BartConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
setup(self)
===========changed ref 0===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayerCollection(nn.Module):
- class FlaxBartDecoderLayerCollection(FlaxBartDecoderLayerCollection):
+ config: DalleBartConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
"""
Edits:
- use custom FlaxBartDecoderLayer
- allow Gradient Checkpointing (nn.remat)
"""
===========changed ref 1===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayer(nn.Module):
- class FlaxBartDecoderLayer(FlaxBartDecoderLayer):
"""
Edits:
- no bias
+ - use custom FlaxBartAttention
- - uses custom FlaxBartAttention
"""
===========changed ref 2===========
# module: dalle_mini.model.modeling
+ def norm(type, *args, **kwargs):
+ if type == "rmsnorm":
+ return RMSNorm(*args, **kwargs)
+ elif type == "layernorm":
+ return nn.LayerNorm(*args, **kwargs)
+ else:
+ raise ValueError(f"Unknown norm type {type}")
+
===========changed ref 3===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayerCollection(nn.Module):
- class FlaxBartDecoderLayerCollection(FlaxBartDecoderLayerCollection):
- def setup(self):
- layer_module = (
- nn.remat(FlaxBartDecoderLayer, concrete=True)
- if self.config.gradient_checkpointing
- else FlaxBartDecoderLayer
- )
- self.layers = [
- layer_module(self.config, name=str(i), dtype=self.dtype)
- for i in range(self.config.decoder_layers)
- ]
- self.layerdrop = self.config.decoder_layerdrop
-
===========changed ref 4===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoderLayerCollection(nn.Module):
- class FlaxBartEncoderLayerCollection(FlaxBartEncoderLayerCollection):
+ config: DalleBartConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
"""
Edits:
- use custom FlaxBartEncoderLayer
- allow Gradient Checkpointing (nn.remat)
"""
===========changed ref 5===========
# module: dalle_mini.model.modeling
+ class RMSNorm(nn.Module):
+ def _compute_rms_sq(self, x, axes):
+ x = jnp.asarray(x, jnp.promote_types(jnp.float32, jnp.result_type(x)))
+ rms_sq = jnp.mean(jax.lax.square(x), axes)
+ return rms_sq
+
===========changed ref 6===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoderLayerCollection(nn.Module):
- class FlaxBartEncoderLayerCollection(FlaxBartEncoderLayerCollection):
- def setup(self):
- layer_module = (
- nn.remat(FlaxBartEncoderLayer, concrete=True)
- if self.config.gradient_checkpointing
- else FlaxBartEncoderLayer
- )
- self.layers = [
- layer_module(self.config, name=str(i), dtype=self.dtype)
- for i in range(self.config.encoder_layers)
- ]
- self.layerdrop = self.config.encoder_layerdrop
-
===========changed ref 7===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
"""
Edits:
- causal mask is used only in decoder and considers image_length
+ - scale attention heads per NormFormer paper
"""
===========changed ref 8===========
# module: dalle_mini.model.modeling
+ # deepnet initialization
+ def deepnet_init(gain=1):
+ init = jax.nn.initializers.glorot_normal()
+
+ def _init(*args, **kwargs):
+ return gain * init(*args, **kwargs)
+
+ return _init
+
===========changed ref 9===========
# module: dalle_mini.model.modeling
+ class RMSNorm(nn.Module):
+ @nn.compact
+ def __call__(self, x):
+ reduction_axes = (-1,)
+ feature_axes = (-1,)
+
+ rms_sq = self._compute_rms_sq(x, reduction_axes)
+
+ return self._normalize(
+ self,
+ x,
+ rms_sq,
+ reduction_axes,
+ feature_axes,
+ self.dtype,
+ self.param_dtype,
+ self.epsilon,
+ self.use_scale,
+ self.scale_init,
+ )
+
===========changed ref 10===========
# module: dalle_mini.model.modeling
+ class RMSNorm(nn.Module):
+ """
+ From "Root Mean Square Layer Normalization" by https://arxiv.org/abs/1910.07467
+
+ Adapted from flax.linen.LayerNorm
+ """
+
+ epsilon: float = 1e-6
+ dtype: Any = jnp.float32
+ param_dtype: Any = jnp.float32
+ use_scale: bool = True
+ scale_init: Any = jax.nn.initializers.ones
+
===========changed ref 11===========
# module: dalle_mini.model.modeling
class FlaxBartDecoder(FlaxBartDecoder):
def setup(self):
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
embed_dim = self.config.d_model
self.padding_idx = self.config.pad_token_id
self.embed_scale = (
math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
)
# Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 0
self.embed_positions = nn.Embed(
self.config.image_length + self.offset, # image length for BOS
embed_dim,
+ embedding_init=deepnet_init()
+ if self.config.use_deepnet_scaling
+ else jax.nn.initializers.normal(self.config.init_std),
- embedding_init=jax.nn.initializers.normal(self.config.init_std),
)
self.layers = FlaxBartDecoderLayerCollection(self.config, self.dtype)
+ self.layernorm_embedding = norm(
+ self.config.ln_type, dtype=self.dtype, epsilon=1e-05
+ )
- self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
|
dalle_mini.model.modeling/FlaxBartForConditionalGenerationModule.setup
|
Modified
|
borisdayma~dalle-mini
|
542378c30e591b7d3e40e8bfbd716f7cc49fb6cd
|
feat: implement transformer variants (#144)
|
<6>:<add> kernel_init=deepnet_init()
<add> if self.config.use_deepnet_scaling
<add> else jax.nn.initializers.normal(self.config.init_std),
<del> kernel_init=jax.nn.initializers.normal(self.config.init_std),
|
# module: dalle_mini.model.modeling
class FlaxBartForConditionalGenerationModule(FlaxBartForConditionalGenerationModule):
def setup(self):
<0> self.model = FlaxBartModule(config=self.config, dtype=self.dtype)
<1> self.lm_head = nn.Dense(
<2> self.config.image_vocab_size
<3> + 1, # image vocab size + 1 for BOS to have same size as decoder inputs (for sharding)
<4> use_bias=False,
<5> dtype=self.dtype,
<6> kernel_init=jax.nn.initializers.normal(self.config.init_std),
<7> )
<8>
|
===========unchanged ref 0===========
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.init_std = init_std
at: transformers.models.bart.modeling_flax_bart.FlaxBartForConditionalGenerationModule
config: BartConfig
dtype: jnp.dtype = jnp.float32
bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros
setup(self)
===========changed ref 0===========
# module: dalle_mini.model.modeling
+ class FFN(nn.Module):
+ """Simple FFN layer"""
+
+ config: DalleBartConfig
+ ffn_dim: int
+ embed_dim: int
+ dtype: jnp.dtype = jnp.float32
+ is_encoder: bool = False
+
===========changed ref 1===========
# module: dalle_mini.model.modeling
+ class GLU(nn.Module):
+ """From "GLU Variants Improve Transformer" by https://arxiv.org/abs/2002.05202"""
+
+ config: DalleBartConfig
+ ffn_dim: int
+ embed_dim: int
+ dtype: jnp.dtype = jnp.float32
+ is_encoder: bool = False
+
===========changed ref 2===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayerCollection(nn.Module):
- class FlaxBartDecoderLayerCollection(FlaxBartDecoderLayerCollection):
+ config: DalleBartConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
"""
Edits:
- use custom FlaxBartDecoderLayer
- allow Gradient Checkpointing (nn.remat)
"""
===========changed ref 3===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayer(nn.Module):
- class FlaxBartDecoderLayer(FlaxBartDecoderLayer):
"""
Edits:
- no bias
+ - use custom FlaxBartAttention
- - uses custom FlaxBartAttention
"""
===========changed ref 4===========
# module: dalle_mini.model.modeling
+ def norm(type, *args, **kwargs):
+ if type == "rmsnorm":
+ return RMSNorm(*args, **kwargs)
+ elif type == "layernorm":
+ return nn.LayerNorm(*args, **kwargs)
+ else:
+ raise ValueError(f"Unknown norm type {type}")
+
===========changed ref 5===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayerCollection(nn.Module):
- class FlaxBartDecoderLayerCollection(FlaxBartDecoderLayerCollection):
- def setup(self):
- layer_module = (
- nn.remat(FlaxBartDecoderLayer, concrete=True)
- if self.config.gradient_checkpointing
- else FlaxBartDecoderLayer
- )
- self.layers = [
- layer_module(self.config, name=str(i), dtype=self.dtype)
- for i in range(self.config.decoder_layers)
- ]
- self.layerdrop = self.config.decoder_layerdrop
-
===========changed ref 6===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoderLayerCollection(nn.Module):
- class FlaxBartEncoderLayerCollection(FlaxBartEncoderLayerCollection):
+ config: DalleBartConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
"""
Edits:
- use custom FlaxBartEncoderLayer
- allow Gradient Checkpointing (nn.remat)
"""
===========changed ref 7===========
# module: dalle_mini.model.modeling
+ class RMSNorm(nn.Module):
+ def _compute_rms_sq(self, x, axes):
+ x = jnp.asarray(x, jnp.promote_types(jnp.float32, jnp.result_type(x)))
+ rms_sq = jnp.mean(jax.lax.square(x), axes)
+ return rms_sq
+
===========changed ref 8===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoderLayerCollection(nn.Module):
- class FlaxBartEncoderLayerCollection(FlaxBartEncoderLayerCollection):
- def setup(self):
- layer_module = (
- nn.remat(FlaxBartEncoderLayer, concrete=True)
- if self.config.gradient_checkpointing
- else FlaxBartEncoderLayer
- )
- self.layers = [
- layer_module(self.config, name=str(i), dtype=self.dtype)
- for i in range(self.config.encoder_layers)
- ]
- self.layerdrop = self.config.encoder_layerdrop
-
===========changed ref 9===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
"""
Edits:
- causal mask is used only in decoder and considers image_length
+ - scale attention heads per NormFormer paper
"""
===========changed ref 10===========
# module: dalle_mini.model.modeling
+ # deepnet initialization
+ def deepnet_init(gain=1):
+ init = jax.nn.initializers.glorot_normal()
+
+ def _init(*args, **kwargs):
+ return gain * init(*args, **kwargs)
+
+ return _init
+
===========changed ref 11===========
# module: dalle_mini.model.modeling
+ class FFN(nn.Module):
+ @nn.compact
+ def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
+ gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
+ self.config
+ )
+ if self.config.ln_positions in ["normformer"]:
+ x = norm(
+ self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=False
+ )(x)
+ x = nn.Dense(
+ self.ffn_dim,
+ dtype=self.dtype,
+ use_bias=False,
+ kernel_init=deepnet_init(gain)
+ if self.config.use_deepnet_scaling
+ else jax.nn.initializers.normal(self.config.init_std),
+ )(x)
+ x = ACT2FN[self.config.activation_function](x)
+ if self.config.ln_positions in ["normformer"]:
+ x = norm(
+ self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=False
+ )(x)
+ x = nn.Dropout(rate=self.config.activation_dropout)(
+ x, deterministic=deterministic
+ )
+ x = nn.Dense(
+ self.embed_dim,
+ dtype=self.dtype,
+ use_bias=False,
+ kernel_init=deepnet_init(gain)
+ if self.config.use_deepnet_scaling
+ else jax.nn.initializers.normal(self.config.init_std),
+ )(x)
+ if self.config.ln_positions in ["swinv2"]:
+ x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
+ x = nn.</s>
===========changed ref 12===========
# module: dalle_mini.model.modeling
+ class FFN(nn.Module):
+ @nn.compact
+ def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
<s>self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
+ x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic)
+ return x
+
|
tools.train.train/TrainingArguments.__post_init__
|
Modified
|
borisdayma~dalle-mini
|
542378c30e591b7d3e40e8bfbd716f7cc49fb6cd
|
feat: implement transformer variants (#144)
|
<24>:<add> if self.log_norm_steps is True:
<add> self.log_norm_steps = self.logging_steps
|
# module: tools.train.train
@dataclass
class TrainingArguments:
def __post_init__(self):
<0> if self.assert_TPU_available:
<1> assert (
<2> jax.local_device_count() == 8
<3> ), "TPUs in use, please check running processes"
<4> assert self.optim in [
<5> "distributed_shampoo",
<6> "adam",
<7> "adafactor",
<8> ], f"Selected optimizer not supported: {self.optim}"
<9> assert self.graft_type in [
<10> "rmsprop_normalized",
<11> "rmsprop",
<12> "adagrad",
<13> "adagrad_normalized",
<14> "sgd",
<15> "sqrt_n",
<16> ], f"Selected graft type not supported: {self.graft_type}"
<17> assert self.lr_decay in [
<18> None,
<19> "linear",
<20> "exponential",
<21> ], f"Selected learning rate decay not supported: {self.lr_decay}"
<22> if self.per_device_eval_batch_size is None:
<23> self.per_device_eval_batch_size = self.per_device_train_batch_size
<24> if (
<25> os.path.exists(self.output_dir)
<26> and os.listdir(self.output_dir)
<27> and self.do_train
<28> and not self.overwrite_output_dir
<29> ):
<30> raise ValueError(
<31> f"Output directory ({self.output_dir}) already exists and is not empty."
<32> "Use --overwrite_output_dir to overcome."
<33> )
<34> assert (
<35> self.mp_devices > 0
<36> ), f"Number of devices for model parallelism must be > 0"
<37> assert (
<38> jax.device_count() % self.mp_devices == 0
<39> ), f"Number of available devices ({jax.device_count()} must be divisible by number of devices used for model parallelism ({self.mp_devices})."
<40> self.dp_devices</s>
|
===========below chunk 0===========
# module: tools.train.train
@dataclass
class TrainingArguments:
def __post_init__(self):
# offset: 1
===========unchanged ref 0===========
at: os
listdir(path: bytes) -> List[bytes]
listdir(path: int) -> List[str]
listdir(path: Optional[str]=...) -> List[str]
listdir(path: _PathLike[str]) -> List[str]
at: os.path
exists(path: Union[AnyStr, _PathLike[AnyStr]]) -> bool
at: tools.train.train.TrainingArguments
output_dir: str = field(
metadata={
"help": "The output directory where the model predictions and checkpoints will be written."
},
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory. "
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(
default=False, metadata={"help": "Whether to run eval on the validation set."}
)
per_device_train_batch_size: int = field(
default=8,
metadata={"help": "Batch size per data parallel device for training."},
)
per_device_eval_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Batch size per data parallel device for evaluation. Same as training batch size if not set."
},
)
gradient_accumulation_steps: int = field(
default=1,
metadata={
"help": "Number of updates steps to accumulate before performing an update pass."
},
)
gradient_checkpointing: bool = field(
default=False, metadata={"help": "Use gradient checkpointing."}
)
learning_rate: float = field(
default=5e-5, metadata={"help": "The initial learning rate."}
)
===========unchanged ref 1===========
optim: str = field(
default="distributed_shampoo",
metadata={
"help": 'The optimizer to use. Can be "distributed_shampoo" (default), "adam" or "adafactor"'
},
)
beta1: float = field(
default=0.9,
metadata={"help": "Beta1 for Adam & Distributed Shampoo."},
)
beta2: float = field(
default=0.999,
metadata={"help": "Beta2 for for Adam & Distributed Shampoo."},
)
adam_epsilon: float = field(
default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."}
)
max_grad_norm: float = field(
default=1.0, metadata={"help": "Max gradient norm for Adafactor."}
)
block_size: int = field(
default=1024,
metadata={"help": "Chunked size for large layers with Distributed Shampoo."},
)
preconditioning_compute_steps: int = field(
default=10, metadata={"help": "Number of steps to update preconditioner."}
)
skip_preconditioning_dim_size_gt: int = field(
default=4096,
metadata={"help": "Max size for preconditioning with Distributed Shampoo."},
)
graft_type: str = field(
default="rmsprop_normalized",
metadata={
"help": "The type of grafting to use. Can be 'rmsprop_normalized' (default), 'rmsprop', 'adagrad', 'adagrad_normalized', 'sgd' or 'sqrt_n'"
},
)
optim_quantized: bool = field(
default=False,
metadata={
"help": "Whether to quantize optimizer (only supported with Distributed Shampoo)."
},
)
===========unchanged ref 2===========
num_train_epochs: int = field(
default=3, metadata={"help": "Total number of training epochs to perform."}
)
warmup_steps: int = field(
default=0, metadata={"help": "Linear warmup over warmup_steps."}
)
lr_decay: str = field(
default=None,
metadata={
"help": "Decay to be used in the learning rate scheduler. Can be None (default), linear or exponential."
},
)
lr_transition_steps: int = field(
default=None,
metadata={
"help": "Number of transition steps associated with learning rate decay when using exponential decay."
},
)
lr_decay_rate: float = field(
default=None,
metadata={
"help": "Decay rate associated with learning rate when using exponential decay."
},
)
lr_staircase: bool = field(
default=False,
metadata={
"help": "Whether to use staircase or continuous learning rate when using exponential decay."
},
)
logging_steps: int = field(
default=40, metadata={"help": "Log every X updates steps."}
)
eval_steps: int = field(
default=400, metadata={"help": "Run an evaluation every X steps."}
)
save_steps: int = field(
default=4000, metadata={"help": "Save checkpoint every X updates steps."}
)
log_model: bool = field(
default=False,
metadata={"help": "Log model to wandb at `save_steps` frequency."},
)
log_histograms: bool = field(
default=False,
metadata={
"help": "Log parameters and gradients histograms. Slows down training."
},
)
===========unchanged ref 3===========
seed_model: int = field(
default=42,
metadata={
"help": "Random seed for the model that will be set at the beginning of training."
},
)
wandb_entity: Optional[str] = field(
default=None,
metadata={"help": "The wandb entity to use (for teams)."},
)
wandb_project: str = field(
default="dalle-mini",
metadata={"help": "The name of the wandb project."},
)
wandb_job_type: str = field(
default="Seq2Seq",
metadata={"help": "The name of the wandb job type."},
)
assert_TPU_available: bool = field(
default=False,
metadata={"help": "Verify that TPU is not in use."},
)
mp_devices: Optional[int] = field(
default=1,
metadata={
"help": "Number of devices required for model parallelism. The other dimension of available devices is used for data parallelism."
},
)
dp_devices: int = field(init=False)
===========changed ref 0===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayer(nn.Module):
- class FlaxBartDecoderLayer(FlaxBartDecoderLayer):
"""
Edits:
- no bias
+ - use custom FlaxBartAttention
- - uses custom FlaxBartAttention
"""
===========changed ref 1===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
"""
Edits:
- causal mask is used only in decoder and considers image_length
+ - scale attention heads per NormFormer paper
"""
|
dalle_mini.data/Dataset.__post_init__
|
Modified
|
borisdayma~dalle-mini
|
542378c30e591b7d3e40e8bfbd716f7cc49fb6cd
|
feat: implement transformer variants (#144)
|
<0>:<add> if self.seed_dataset is None:
<add> # create a random seed
<add> self.seed_dataset = random.randint(0, 2**32 - 1)
|
# module: dalle_mini.data
@dataclass
class Dataset:
def __post_init__(self):
<0> self.multi_hosts = jax.process_count() > 1
<1> # feed blank captions only in streaming mode for now
<2> # otherwise dataset could be cached with same blanked captions
<3> if self.blank_caption_prob:
<4> assert (
<5> self.streaming is True
<6> ), "blank_caption_prob can only be used in streaming mode"
<7> # define data_files
<8> if self.train_file is not None or self.validation_file is not None:
<9> # accept braceexpand notation
<10> for k in ["train_file", "validation_file"]:
<11> f = getattr(self, k)
<12> if isinstance(f, str):
<13> setattr(self, k, list(braceexpand(f)))
<14> # for list of files, split training data shards by host
<15> if (
<16> isinstance(self.train_file, list)
<17> and self.multi_hosts
<18> and self.shard_by_host
<19> ):
<20> self.train_file = self.train_file[
<21> jax.process_index() :: jax.process_count()
<22> ]
<23> data_files = {
<24> "train": self.train_file,
<25> "validation": self.validation_file,
<26> }
<27> else:
<28> data_files = None
<29>
<30> # load dataset
<31> dataset = load_dataset(
<32> self.dataset_repo_or_path,
<33> data_files=data_files,
<34> streaming=self.streaming,
<35> use_auth_token=self.use_auth_token,
<36> )
<37> if self.do_train:
<38> if "train" not in dataset:
<39> raise ValueError("Training requires a training dataset")
<40> self.train_dataset = dataset["train"]
<41> if self.max_train_samples is not None:
<42> self.train_dataset = (
<43> self.train_dataset.take(self.</s>
|
===========below chunk 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
def __post_init__(self):
# offset: 1
if self.streaming
else self.train_dataset.select(range(self.max_train_samples))
)
if self.do_eval:
if "validation" not in dataset:
raise ValueError("Evaluating requires a validation dataset")
self.eval_dataset = dataset["validation"]
if self.max_eval_samples is not None:
self.eval_dataset = (
self.eval_dataset.take(self.max_eval_samples)
if self.streaming
else self.eval_dataset.select(range(self.max_eval_samples))
)
===========unchanged ref 0===========
at: dalle_mini.data.Dataset
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
shard_by_host: bool = False
blank_caption_prob: float = 0.0
clip_score_column: str = "clip_score"
min_clip_score: float = None
max_clip_score: float = None
filter_column: str = None
filter_value: str = None
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
multi_hosts: bool = field(init=False)
at: dalle_mini.data.Dataset.preprocess
self.train_dataset = self.train_dataset.shuffle(
buffer_size=5000, seed=self.seed_dataset
)
self.train_dataset = (
self.train_dataset.map(partial_blank_caption_function)
if self.streaming
else self.train_dataset.map(
partial_blank_caption_function,
num_proc=self.preprocessing_num_workers,
load_from_cache_file=False,
desc="Blanking some captions",
)
)
===========unchanged ref 1===========
at: datasets.arrow_dataset.Dataset
wrapper(*, keep_in_memory: bool=False, indices_cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, new_fingerprint: Optional[str]=None)
at: datasets.load
load_dataset(path: str, name: Optional[str]=None, data_dir: Optional[str]=None, data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]]=None, split: Optional[Union[str, Split]]=None, cache_dir: Optional[str]=None, features: Optional[Features]=None, download_config: Optional[DownloadConfig]=None, download_mode: Optional[DownloadMode]=None, ignore_verifications: bool=False, keep_in_memory: Optional[bool]=None, save_infos: bool=False, revision: Optional[Union[str, Version]]=None, use_auth_token: Optional[Union[bool, str]]=None, task: Optional[Union[str, TaskTemplate]]=None, streaming: bool=False, num_proc: Optional[int]=None, *, num_process: int=1, process_id: int=0, seed: Optional[int]=None, experiment_id: Optional[str]=None, max_concurrent_cache_files: int=10000, timeout: Union[int, float]=100, base_path: Optional[str]=None, info: Optional[DatasetInfo]=None, repo_id: Optional[str]=None, **kwargs) -> Union[DatasetDict, Dataset, IterableDatasetDict, IterableDataset]
at: random
randint = _inst.randint
===========changed ref 0===========
# module: tools.train.train
- class MetricsLogger:
- def __init__(self, step):
- self.step = step
- self.time = time.perf_counter()
- self.state_dict = {}
-
===========changed ref 1===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayer(nn.Module):
- class FlaxBartDecoderLayer(FlaxBartDecoderLayer):
"""
Edits:
- no bias
+ - use custom FlaxBartAttention
- - uses custom FlaxBartAttention
"""
===========changed ref 2===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
"""
Edits:
- causal mask is used only in decoder and considers image_length
+ - scale attention heads per NormFormer paper
"""
===========changed ref 3===========
# module: dalle_mini.model.modeling
+ # deepnet initialization
+ def deepnet_init(gain=1):
+ init = jax.nn.initializers.glorot_normal()
+
+ def _init(*args, **kwargs):
+ return gain * init(*args, **kwargs)
+
+ return _init
+
===========changed ref 4===========
# module: dalle_mini.model.modeling
+ class FFN(nn.Module):
+ """Simple FFN layer"""
+
+ config: DalleBartConfig
+ ffn_dim: int
+ embed_dim: int
+ dtype: jnp.dtype = jnp.float32
+ is_encoder: bool = False
+
===========changed ref 5===========
# module: dalle_mini.model.modeling
+ def norm(type, *args, **kwargs):
+ if type == "rmsnorm":
+ return RMSNorm(*args, **kwargs)
+ elif type == "layernorm":
+ return nn.LayerNorm(*args, **kwargs)
+ else:
+ raise ValueError(f"Unknown norm type {type}")
+
===========changed ref 6===========
# module: dalle_mini.model.modeling
+ class RMSNorm(nn.Module):
+ def _compute_rms_sq(self, x, axes):
+ x = jnp.asarray(x, jnp.promote_types(jnp.float32, jnp.result_type(x)))
+ rms_sq = jnp.mean(jax.lax.square(x), axes)
+ return rms_sq
+
===========changed ref 7===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayerCollection(nn.Module):
- class FlaxBartDecoderLayerCollection(FlaxBartDecoderLayerCollection):
+ config: DalleBartConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
"""
Edits:
- use custom FlaxBartDecoderLayer
- allow Gradient Checkpointing (nn.remat)
"""
===========changed ref 8===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoderLayerCollection(nn.Module):
- class FlaxBartEncoderLayerCollection(FlaxBartEncoderLayerCollection):
+ config: DalleBartConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
"""
Edits:
- use custom FlaxBartEncoderLayer
- allow Gradient Checkpointing (nn.remat)
"""
===========changed ref 9===========
# module: dalle_mini.model.modeling
+ class GLU(nn.Module):
+ """From "GLU Variants Improve Transformer" by https://arxiv.org/abs/2002.05202"""
+
+ config: DalleBartConfig
+ ffn_dim: int
+ embed_dim: int
+ dtype: jnp.dtype = jnp.float32
+ is_encoder: bool = False
+
|
dalle_mini.data/Dataset.preprocess
|
Modified
|
borisdayma~dalle-mini
|
542378c30e591b7d3e40e8bfbd716f7cc49fb6cd
|
feat: implement transformer variants (#144)
|
<8>:<add> self.train_dataset = self.train_dataset.shuffle(
<del> self.train_dataset = self.train_dataset.shuffle(5000, self.seed_dataset)
<9>:<add> buffer_size=5000, seed=self.seed_dataset
<add> )
<10>:<del> # prepare rng for later shuffling
<11>:<del> if self.seed_dataset is None:
<12>:<del> self.seed_dataset = np.random.get_state()[1][0]
|
# module: dalle_mini.data
@dataclass
class Dataset:
def preprocess(self, tokenizer, config):
<0> # get required config variables
<1> decoder_start_token_id = config.decoder_start_token_id
<2> normalize_text = config.normalize_text
<3> max_length = config.max_text_length
<4>
<5> if self.streaming:
<6> # we need to shuffle early in streaming mode
<7> if hasattr(self, "train_dataset"):
<8> self.train_dataset = self.train_dataset.shuffle(5000, self.seed_dataset)
<9> else:
<10> # prepare rng for later shuffling
<11> if self.seed_dataset is None:
<12> self.seed_dataset = np.random.get_state()[1][0]
<13> self.rng_dataset = jax.random.PRNGKey(self.seed_dataset)
<14>
<15> # filter data
<16> partial_filter_function = partial(
<17> filter_function,
<18> filter_column=self.filter_column,
<19> filter_value=self.filter_value,
<20> clip_score_column=self.clip_score_column,
<21> min_clip_score=self.min_clip_score,
<22> max_clip_score=self.max_clip_score,
<23> )
<24> for ds in ["train_dataset", "eval_dataset"]:
<25> if hasattr(self, ds):
<26> setattr(
<27> self,
<28> ds,
<29> (
<30> getattr(self, ds).filter(partial_filter_function)
<31> if self.streaming
<32> else getattr(self, ds).filter(
<33> partial_filter_function,
<34> num_proc=self.preprocessing_num_workers,
<35> load_from_cache_file=not self.overwrite_cache,
<36> desc="Filtering datasets",
<37> )
<38> ),
<39> )
<40>
<41> # normalize text
<42> if normalize_text:
<43> text_normalizer = TextNormalizer()
<44> partial_normalize_function =</s>
|
===========below chunk 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
def preprocess(self, tokenizer, config):
# offset: 1
normalize_function,
text_column=self.text_column,
text_normalizer=text_normalizer,
)
for ds in ["train_dataset", "eval_dataset"]:
if hasattr(self, ds):
setattr(
self,
ds,
(
getattr(self, ds).map(partial_normalize_function)
if self.streaming
else getattr(self, ds).map(
partial_normalize_function,
num_proc=self.preprocessing_num_workers,
load_from_cache_file=not self.overwrite_cache,
desc="Normalizing datasets",
)
),
)
# blank captions
if self.blank_caption_prob:
partial_blank_caption_function = partial(
blank_caption_function,
text_column=self.text_column,
blank_caption_prob=self.blank_caption_prob,
)
if hasattr(self, "train_dataset"):
self.train_dataset = (
self.train_dataset.map(partial_blank_caption_function)
if self.streaming
else self.train_dataset.map(
partial_blank_caption_function,
num_proc=self.preprocessing_num_workers,
load_from_cache_file=False,
desc="Blanking some captions",
)
)
# preprocess
partial_preprocess_function = partial(
preprocess_function,
tokenizer=tokenizer,
text_column=self.text_column,
encoding_column=self.encoding_column,
max_length=max_length,
decoder_start_token_id=decoder_start_token_id,
)
for ds in ["train_dataset", "eval_dataset"]:
if hasattr(self, ds):
setattr(
self,
ds,
(
getattr(self, ds).map</s>
===========below chunk 1===========
# module: dalle_mini.data
@dataclass
class Dataset:
def preprocess(self, tokenizer, config):
# offset: 2
<s> hasattr(self, ds):
setattr(
self,
ds,
(
getattr(self, ds).map(
partial_preprocess_function,
batched=True,
remove_columns=[
self.text_column,
self.encoding_column,
],
)
if self.streaming
else getattr(self, ds).map(
partial_preprocess_function,
batched=True,
remove_columns=getattr(ds, "column_names"),
num_proc=self.preprocessing_num_workers,
load_from_cache_file=not self.overwrite_cache,
desc="Preprocessing datasets",
)
),
)
===========unchanged ref 0===========
at: dalle_mini.data
blank_caption_function(example, text_column, blank_caption_prob)
normalize_function(example, text_column, text_normalizer)
filter_function(example, min_clip_score, max_clip_score, clip_score_column, filter_column, filter_value)
preprocess_function(examples, tokenizer, text_column, encoding_column, max_length, decoder_start_token_id)
at: dalle_mini.data.Dataset
streaming: bool = True
text_column: str = "caption"
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
blank_caption_prob: float = 0.0
clip_score_column: str = "clip_score"
min_clip_score: float = None
max_clip_score: float = None
filter_column: str = None
filter_value: str = None
at: dalle_mini.data.Dataset.__post_init__
self.seed_dataset = random.randint(0, 2**32 - 1)
self.train_dataset = dataset["train"]
self.train_dataset = (
self.train_dataset.take(self.max_train_samples)
if self.streaming
else self.train_dataset.select(range(self.max_train_samples))
)
self.eval_dataset = dataset["validation"]
self.eval_dataset = (
self.eval_dataset.take(self.max_eval_samples)
if self.streaming
else self.eval_dataset.select(range(self.max_eval_samples))
)
at: dalle_mini.data.Dataset.dataloader
self.rng_dataset, input_rng = jax.random.split(self.rng_dataset)
===========unchanged ref 1===========
at: datasets.arrow_dataset.Dataset
wrapper(*, keep_in_memory: bool=False, indices_cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, new_fingerprint: Optional[str]=None)
wrapper(*, generator: Optional[np.random.Generator]=None, keep_in_memory: bool=False, load_from_cache_file: bool=True, indices_cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, new_fingerprint: Optional[str]=None)
at: functools
partial(func: Callable[..., _T], *args: Any, **kwargs: Any)
partial(func, *args, **keywords, /) -> function with partial application()
===========changed ref 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
def __post_init__(self):
+ if self.seed_dataset is None:
+ # create a random seed
+ self.seed_dataset = random.randint(0, 2**32 - 1)
self.multi_hosts = jax.process_count() > 1
# feed blank captions only in streaming mode for now
# otherwise dataset could be cached with same blanked captions
if self.blank_caption_prob:
assert (
self.streaming is True
), "blank_caption_prob can only be used in streaming mode"
# define data_files
if self.train_file is not None or self.validation_file is not None:
# accept braceexpand notation
for k in ["train_file", "validation_file"]:
f = getattr(self, k)
if isinstance(f, str):
setattr(self, k, list(braceexpand(f)))
# for list of files, split training data shards by host
if (
isinstance(self.train_file, list)
and self.multi_hosts
and self.shard_by_host
):
self.train_file = self.train_file[
jax.process_index() :: jax.process_count()
]
data_files = {
"train": self.train_file,
"validation": self.validation_file,
}
else:
data_files = None
# load dataset
dataset = load_dataset(
self.dataset_repo_or_path,
data_files=data_files,
streaming=self.streaming,
use_auth_token=self.use_auth_token,
)
if self.do_train:
if "train" not in dataset:
raise ValueError("Training requires a training dataset")
self.train_dataset = dataset["train"]
if self.max_train_samples is not None:
self.train_dataset = (
self.train_dataset.take(self.max_</s>
|
dalle_mini.model.partitions/_get_partition_rules
|
Modified
|
borisdayma~dalle-mini
|
542378c30e591b7d3e40e8bfbd716f7cc49fb6cd
|
feat: implement transformer variants (#144)
|
<2>:<add> (("embed_positions", "embedding"), P("mp", None)),
<del> ((r"embed_positions", "embedding"), P("mp", None)),
<3>:<add> (("embed_tokens", "embedding"), P("mp", None)),
<del> ((r"embed_tokens", "embedding"), P("mp", None)),
<4>:<add> # attention
<del> # self-attention
<5>:<add> (("(q_proj|k_proj|v_proj)", "kernel"), P(None, "mp")),
<del> ((r"self_attn", "(q_proj|k_proj|v_proj)", "kernel"), P(None, "mp")),
<6>:<add> (("out_proj", "kernel"), P("mp", None)),
<del> ((r"self_attn", "out_proj", "kernel"), P("mp", None)),
<7>:<del> # enc-dec attention
<8>:<del> ((r"encoder_attn", "(q_proj|k_proj|v_proj)", "kernel"), P(None, "mp")),
<9>:<del> ((r"encoder_attn", "out_proj", "kernel"), P("mp", None)),
<11>:<add> (("Dense_0", "kernel"), P(None, "mp")),
<del> ((r"fc1", "kernel"), P(None, "mp")),
<12>:<add> (("GLU.*", "Dense_1", "kernel"), P(None, "mp")),
<add> (("GLU.*", "Dense_2", "kernel"), P("mp", None)),
<del> ((r"fc2", "kernel"), P("mp", None)),
<13>:<add> (("FFN.*", "Dense_1", "kernel"), P("mp", None)),
<14>:<del> ((r"layernorm_embedding", "(bias|scale)"), None),
<15>:<del> ((r"self_attn_layer_norm", "(bias|scale)"), None),
<16>:<del> ((r"encoder_attn_layer_norm", "(bias|scale)"), None),
<17>:<add> (("(bias|scale)",), None),
<del> ((r"final_layer_norm", "(bias|scale)"), None),
<18>:<add> (("lm_head", "kernel"),
|
# module: dalle_mini.model.partitions
def _get_partition_rules():
<0> return [
<1> # embeddings
<2> ((r"embed_positions", "embedding"), P("mp", None)),
<3> ((r"embed_tokens", "embedding"), P("mp", None)),
<4> # self-attention
<5> ((r"self_attn", "(q_proj|k_proj|v_proj)", "kernel"), P(None, "mp")),
<6> ((r"self_attn", "out_proj", "kernel"), P("mp", None)),
<7> # enc-dec attention
<8> ((r"encoder_attn", "(q_proj|k_proj|v_proj)", "kernel"), P(None, "mp")),
<9> ((r"encoder_attn", "out_proj", "kernel"), P("mp", None)),
<10> # FFN
<11> ((r"fc1", "kernel"), P(None, "mp")),
<12> ((r"fc2", "kernel"), P("mp", None)),
<13> # layer norms
<14> ((r"layernorm_embedding", "(bias|scale)"), None),
<15> ((r"self_attn_layer_norm", "(bias|scale)"), None),
<16> ((r"encoder_attn_layer_norm", "(bias|scale)"), None),
<17> ((r"final_layer_norm", "(bias|scale)"), None),
<18> ((r"lm_head", "kernel"), P(None, "mp")),
<19> ]
<20>
|
===========changed ref 0===========
# module: tools.train.train
- class MetricsLogger:
- def __init__(self, step):
- self.step = step
- self.time = time.perf_counter()
- self.state_dict = {}
-
===========changed ref 1===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayer(nn.Module):
- class FlaxBartDecoderLayer(FlaxBartDecoderLayer):
"""
Edits:
- no bias
+ - use custom FlaxBartAttention
- - uses custom FlaxBartAttention
"""
===========changed ref 2===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
"""
Edits:
- causal mask is used only in decoder and considers image_length
+ - scale attention heads per NormFormer paper
"""
===========changed ref 3===========
# module: dalle_mini.model.modeling
+ # deepnet initialization
+ def deepnet_init(gain=1):
+ init = jax.nn.initializers.glorot_normal()
+
+ def _init(*args, **kwargs):
+ return gain * init(*args, **kwargs)
+
+ return _init
+
===========changed ref 4===========
# module: dalle_mini.model.modeling
+ class FFN(nn.Module):
+ """Simple FFN layer"""
+
+ config: DalleBartConfig
+ ffn_dim: int
+ embed_dim: int
+ dtype: jnp.dtype = jnp.float32
+ is_encoder: bool = False
+
===========changed ref 5===========
# module: dalle_mini.model.modeling
+ def norm(type, *args, **kwargs):
+ if type == "rmsnorm":
+ return RMSNorm(*args, **kwargs)
+ elif type == "layernorm":
+ return nn.LayerNorm(*args, **kwargs)
+ else:
+ raise ValueError(f"Unknown norm type {type}")
+
===========changed ref 6===========
# module: dalle_mini.model.modeling
+ class RMSNorm(nn.Module):
+ def _compute_rms_sq(self, x, axes):
+ x = jnp.asarray(x, jnp.promote_types(jnp.float32, jnp.result_type(x)))
+ rms_sq = jnp.mean(jax.lax.square(x), axes)
+ return rms_sq
+
===========changed ref 7===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayerCollection(nn.Module):
- class FlaxBartDecoderLayerCollection(FlaxBartDecoderLayerCollection):
+ config: DalleBartConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
"""
Edits:
- use custom FlaxBartDecoderLayer
- allow Gradient Checkpointing (nn.remat)
"""
===========changed ref 8===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoderLayerCollection(nn.Module):
- class FlaxBartEncoderLayerCollection(FlaxBartEncoderLayerCollection):
+ config: DalleBartConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
"""
Edits:
- use custom FlaxBartEncoderLayer
- allow Gradient Checkpointing (nn.remat)
"""
===========changed ref 9===========
# module: dalle_mini.model.modeling
+ class GLU(nn.Module):
+ """From "GLU Variants Improve Transformer" by https://arxiv.org/abs/2002.05202"""
+
+ config: DalleBartConfig
+ ffn_dim: int
+ embed_dim: int
+ dtype: jnp.dtype = jnp.float32
+ is_encoder: bool = False
+
===========changed ref 10===========
# module: dalle_mini.model.modeling
+ class RMSNorm(nn.Module):
+ """
+ From "Root Mean Square Layer Normalization" by https://arxiv.org/abs/1910.07467
+
+ Adapted from flax.linen.LayerNorm
+ """
+
+ epsilon: float = 1e-6
+ dtype: Any = jnp.float32
+ param_dtype: Any = jnp.float32
+ use_scale: bool = True
+ scale_init: Any = jax.nn.initializers.ones
+
===========changed ref 11===========
# module: dalle_mini.model.modeling
+ class RMSNorm(nn.Module):
+ @nn.compact
+ def __call__(self, x):
+ reduction_axes = (-1,)
+ feature_axes = (-1,)
+
+ rms_sq = self._compute_rms_sq(x, reduction_axes)
+
+ return self._normalize(
+ self,
+ x,
+ rms_sq,
+ reduction_axes,
+ feature_axes,
+ self.dtype,
+ self.param_dtype,
+ self.epsilon,
+ self.use_scale,
+ self.scale_init,
+ )
+
===========changed ref 12===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayerCollection(nn.Module):
- class FlaxBartDecoderLayerCollection(FlaxBartDecoderLayerCollection):
- def setup(self):
- layer_module = (
- nn.remat(FlaxBartDecoderLayer, concrete=True)
- if self.config.gradient_checkpointing
- else FlaxBartDecoderLayer
- )
- self.layers = [
- layer_module(self.config, name=str(i), dtype=self.dtype)
- for i in range(self.config.decoder_layers)
- ]
- self.layerdrop = self.config.decoder_layerdrop
-
===========changed ref 13===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoderLayerCollection(nn.Module):
- class FlaxBartEncoderLayerCollection(FlaxBartEncoderLayerCollection):
- def setup(self):
- layer_module = (
- nn.remat(FlaxBartEncoderLayer, concrete=True)
- if self.config.gradient_checkpointing
- else FlaxBartEncoderLayer
- )
- self.layers = [
- layer_module(self.config, name=str(i), dtype=self.dtype)
- for i in range(self.config.encoder_layers)
- ]
- self.layerdrop = self.config.encoder_layerdrop
-
===========changed ref 14===========
# module: dalle_mini.model.utils
- def copy_blobs(source_path, dest_path):
- assert source_path.startswith("gs://")
- from google.cloud import storage
-
- bucket_path = Path(source_path[5:])
- bucket, dir_path = str(bucket_path).split("/", 1)
- client = storage.Client()
- bucket = client.bucket(bucket)
- blobs = client.list_blobs(bucket, prefix=f"{dir_path}/")
- for blob in blobs:
- dest_name = str(Path(dest_path) / Path(blob.name).name)
- blob.download_to_filename(dest_name)
-
===========changed ref 15===========
# module: dalle_mini.model.modeling
+ remat = nn_partitioning.remat
+
+ # deepnet gain
+ deepnet_gain = {
+ "encoder": {
+ "alpha": lambda config: 0.81
+ * (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
+ "beta": lambda config: 0.87
+ * (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
+ },
+ "decoder": {
+ "alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
+ "beta": lambda config: (12 * config.decoder_layers) ** -0.25,
+ },
+ }
|
dalle_mini.model.partitions/set_partitions
|
Modified
|
borisdayma~dalle-mini
|
542378c30e591b7d3e40e8bfbd716f7cc49fb6cd
|
feat: implement transformer variants (#144)
|
<6>:<add> print(f"Unmatched -> {k}")
<del> print(k)
|
# module: dalle_mini.model.partitions
def set_partitions(in_dict):
<0> rules = _get_partition_rules()
<1> replace = _replacement_rules(rules)
<2> initd = {k: _unmatched for k in flatten_dict(in_dict)}
<3> result = {k: replace(k, v) for k, v in initd.items()}
<4> for k, v in result.items():
<5> if v == _unmatched:
<6> print(k)
<7> assert _unmatched not in result.values(), "Incomplete partition spec."
<8> return freeze(unflatten_dict(result))
<9>
|
===========unchanged ref 0===========
at: dalle_mini.model.partitions
_unmatched = object()
_replacement_rules(rules)
at: dalle_mini.model.partitions.set_partitions
rules = _get_partition_rules()
===========changed ref 0===========
# module: dalle_mini.model.partitions
def _get_partition_rules():
return [
# embeddings
+ (("embed_positions", "embedding"), P("mp", None)),
- ((r"embed_positions", "embedding"), P("mp", None)),
+ (("embed_tokens", "embedding"), P("mp", None)),
- ((r"embed_tokens", "embedding"), P("mp", None)),
+ # attention
- # self-attention
+ (("(q_proj|k_proj|v_proj)", "kernel"), P(None, "mp")),
- ((r"self_attn", "(q_proj|k_proj|v_proj)", "kernel"), P(None, "mp")),
+ (("out_proj", "kernel"), P("mp", None)),
- ((r"self_attn", "out_proj", "kernel"), P("mp", None)),
- # enc-dec attention
- ((r"encoder_attn", "(q_proj|k_proj|v_proj)", "kernel"), P(None, "mp")),
- ((r"encoder_attn", "out_proj", "kernel"), P("mp", None)),
# FFN
+ (("Dense_0", "kernel"), P(None, "mp")),
- ((r"fc1", "kernel"), P(None, "mp")),
+ (("GLU.*", "Dense_1", "kernel"), P(None, "mp")),
+ (("GLU.*", "Dense_2", "kernel"), P("mp", None)),
- ((r"fc2", "kernel"), P("mp", None)),
+ (("FFN.*", "Dense_1", "kernel"), P("mp", None)),
# layer norms
- ((r"layernorm_embedding", "(bias|scale)"), None),
- ((r"self_attn_layer_norm", "(bias|scale)"), None),
- ((r</s>
===========changed ref 1===========
# module: dalle_mini.model.partitions
def _get_partition_rules():
# offset: 1
<s> None),
- ((r"self_attn_layer_norm", "(bias|scale)"), None),
- ((r"encoder_attn_layer_norm", "(bias|scale)"), None),
+ (("(bias|scale)",), None),
- ((r"final_layer_norm", "(bias|scale)"), None),
+ (("lm_head", "kernel"), P(None, "mp")),
- ((r"lm_head", "kernel"), P(None, "mp")),
+ # head scale and tau
+ (("(head_scale|tau)",), None),
]
===========changed ref 2===========
# module: tools.train.train
- class MetricsLogger:
- def __init__(self, step):
- self.step = step
- self.time = time.perf_counter()
- self.state_dict = {}
-
===========changed ref 3===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayer(nn.Module):
- class FlaxBartDecoderLayer(FlaxBartDecoderLayer):
"""
Edits:
- no bias
+ - use custom FlaxBartAttention
- - uses custom FlaxBartAttention
"""
===========changed ref 4===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
"""
Edits:
- causal mask is used only in decoder and considers image_length
+ - scale attention heads per NormFormer paper
"""
===========changed ref 5===========
# module: dalle_mini.model.modeling
+ # deepnet initialization
+ def deepnet_init(gain=1):
+ init = jax.nn.initializers.glorot_normal()
+
+ def _init(*args, **kwargs):
+ return gain * init(*args, **kwargs)
+
+ return _init
+
===========changed ref 6===========
# module: dalle_mini.model.modeling
+ class FFN(nn.Module):
+ """Simple FFN layer"""
+
+ config: DalleBartConfig
+ ffn_dim: int
+ embed_dim: int
+ dtype: jnp.dtype = jnp.float32
+ is_encoder: bool = False
+
===========changed ref 7===========
# module: dalle_mini.model.modeling
+ def norm(type, *args, **kwargs):
+ if type == "rmsnorm":
+ return RMSNorm(*args, **kwargs)
+ elif type == "layernorm":
+ return nn.LayerNorm(*args, **kwargs)
+ else:
+ raise ValueError(f"Unknown norm type {type}")
+
===========changed ref 8===========
# module: dalle_mini.model.modeling
+ class RMSNorm(nn.Module):
+ def _compute_rms_sq(self, x, axes):
+ x = jnp.asarray(x, jnp.promote_types(jnp.float32, jnp.result_type(x)))
+ rms_sq = jnp.mean(jax.lax.square(x), axes)
+ return rms_sq
+
===========changed ref 9===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoderLayerCollection(nn.Module):
- class FlaxBartDecoderLayerCollection(FlaxBartDecoderLayerCollection):
+ config: DalleBartConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
"""
Edits:
- use custom FlaxBartDecoderLayer
- allow Gradient Checkpointing (nn.remat)
"""
===========changed ref 10===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoderLayerCollection(nn.Module):
- class FlaxBartEncoderLayerCollection(FlaxBartEncoderLayerCollection):
+ config: DalleBartConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
"""
Edits:
- use custom FlaxBartEncoderLayer
- allow Gradient Checkpointing (nn.remat)
"""
===========changed ref 11===========
# module: dalle_mini.model.modeling
+ class GLU(nn.Module):
+ """From "GLU Variants Improve Transformer" by https://arxiv.org/abs/2002.05202"""
+
+ config: DalleBartConfig
+ ffn_dim: int
+ embed_dim: int
+ dtype: jnp.dtype = jnp.float32
+ is_encoder: bool = False
+
===========changed ref 12===========
# module: dalle_mini.model.modeling
+ class RMSNorm(nn.Module):
+ """
+ From "Root Mean Square Layer Normalization" by https://arxiv.org/abs/1910.07467
+
+ Adapted from flax.linen.LayerNorm
+ """
+
+ epsilon: float = 1e-6
+ dtype: Any = jnp.float32
+ param_dtype: Any = jnp.float32
+ use_scale: bool = True
+ scale_init: Any = jax.nn.initializers.ones
+
|
dalle_mini.model.text/HashtagProcessor.__init__
|
Modified
|
borisdayma~dalle-mini
|
3b8d8cb0d153f8ad1b8be3c6d6e09b0a40c64c8f
|
fix(textnormalizer): consider utf8 on windows (#148)
|
<4>:<add> l.split()[0]
<add> for l in Path(wiki_word_frequency).read_text(encoding="utf8").splitlines()
<del> l.split()[0] for l in Path(wiki_word_frequency).read_text().splitlines()
|
# module: dalle_mini.model.text
class HashtagProcessor:
# Adapted from wordninja library
# We use our wikipedia word count + a good heuristic to make it work
def __init__(self):
<0> wiki_word_frequency = hf_hub_download(
<1> "dalle-mini/dalle-mini", filename="enwiki-words-frequency.txt"
<2> )
<3> self._word_cost = (
<4> l.split()[0] for l in Path(wiki_word_frequency).read_text().splitlines()
<5> )
<6> self._word_cost = {
<7> str(k): math.log(float(i + 1)) for i, k in enumerate(self._word_cost)
<8> }
<9> self._max_word = max(len(x) for x in self._word_cost.keys())
<10> self._SPLIT_RE = re.compile("[^a-zA-Z0-9']+")
<11>
|
===========unchanged ref 0===========
at: huggingface_hub.file_download
hf_hub_download(repo_id: str, filename: str, *, subfolder: Optional[str]=None, repo_type: Optional[str]=None, revision: Optional[str]=None, library_name: Optional[str]=None, library_version: Optional[str]=None, cache_dir: Union[str, Path, None]=None, local_dir: Union[str, Path, None]=None, local_dir_use_symlinks: Union[bool, Literal["auto"]]="auto", user_agent: Union[Dict, str, None]=None, force_download: bool=False, force_filename: Optional[str]=None, proxies: Optional[Dict]=None, etag_timeout: float=DEFAULT_ETAG_TIMEOUT, resume_download: bool=False, token: Union[bool, str, None]=None, local_files_only: bool=False, legacy_cache_layout: bool=False, endpoint: Optional[str]=None) -> str
at: math
log(x: SupportsFloat, base: SupportsFloat=...) -> float
at: pathlib
Path()
at: pathlib.Path
__slots__ = ()
read_text(encoding: Optional[str]=..., errors: Optional[str]=...) -> str
|
dalle_mini.model.configuration/DalleBartConfig.__init__
|
Modified
|
borisdayma~dalle-mini
|
472c4cc44dcbd88275440c2921f0eb0bae9d30a7
|
feat: add cogview
|
<13>:<add> "cogview",
|
<s>)
- ln_positions="deepnet", # layer normalization positions, "normformer", "swinv2", "deepnet" (same as post-ln)
use_cosine_attention=False, # used in Swin v2
tau_init=0.05, # used only in cosine attention (Swin v2)
use_deepnet_scaling=False, # used in Deepnet
use_glu=False, # "GLU Variants Improve Transformer"
**kwargs,
):
<0> # text normalizer
<1> self.normalize_text = normalize_text
<2>
<3> # transformer variants
<4> self.head_scale = head_scale # per Normformer
<5> assert ln_type in [
<6> "rmsnorm",
<7> "layernorm",
<8> ], "ln_type must be 'rmsnorm' or 'layernorm'"
<9> self.ln_type = ln_type
<10> assert ln_positions in [
<11> "normformer",
<12> "swinv2",
<13> "deepnet",
<14> ], "ln_positions must be 'normformer', 'swinv2' or 'deepnet'"
<15> self.ln_positions = ln_positions
<16> self.use_cosine_attention = use_cosine_attention
<17> self.tau_init = tau_init
<18> self.use_deepnet_scaling = use_deepnet_scaling
<19> self.use_glu = use_glu
<20>
<21> # common parameters
<22> self.encoder_vocab_size = encoder_vocab_size
<23> self.image_vocab_size = image_vocab_size
<24> self.image_length = image_length
<25> self.max_text_length = max_text_length
<26> self.d_model = d_model
<27> self.encoder_ffn_dim = encoder_ffn_dim
<28> self.encoder_layers = encoder_layers
<29> self.encoder_attention_heads = encoder_attention_heads
<30> self.decoder_ffn_dim = decoder_ffn_dim
<31> self.decoder_layers = decoder_layers</s>
|
===========below chunk 0===========
<s>_positions="deepnet", # layer normalization positions, "normformer", "swinv2", "deepnet" (same as post-ln)
use_cosine_attention=False, # used in Swin v2
tau_init=0.05, # used only in cosine attention (Swin v2)
use_deepnet_scaling=False, # used in Deepnet
use_glu=False, # "GLU Variants Improve Transformer"
**kwargs,
):
# offset: 1
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.use_cache = use_cache
self.gradient_checkpointing = gradient_checkpointing
self.scale_embedding = (
scale_embedding # scale factor will be sqrt(d_model) if True
)
# special token id's are appended to vocab if not provided
decoder_start_token_id = kwargs.pop("decoder_start_token_id", image_vocab_size)
bos_token_id = kwargs.pop("bos_token_id", image_vocab_size)
pad_token_id = kwargs.pop("pad_token_id", image_vocab_size)
eos_token_id = kwargs.pop("eos_token_id", image_vocab_size)
# we generate to image_length + 1 (for bos) by default
min_length = kwargs.pop("min_length", image_length + 1)
max_length = kwargs.pop("max_length", image_length + 1)
super().__init__(
# args required in parent class
is_encoder_decoder=is_encoder_decoder,
tie_word_embeddings=tie_word_embeddings,
forced_eos_token_id=forced_eos_token_id,
decoder_start_token_id=decoder_start_token_id,
bos_token_id=bos_token_id,
</s>
===========below chunk 1===========
<s>_positions="deepnet", # layer normalization positions, "normformer", "swinv2", "deepnet" (same as post-ln)
use_cosine_attention=False, # used in Swin v2
tau_init=0.05, # used only in cosine attention (Swin v2)
use_deepnet_scaling=False, # used in Deepnet
use_glu=False, # "GLU Variants Improve Transformer"
**kwargs,
):
# offset: 2
<s>_token_id=decoder_start_token_id,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
min_length=min_length,
max_length=max_length,
do_sample=do_sample,
**kwargs,
)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get(
"force_bos_token_to_be_generated", False
):
self.forced_bos_token_id = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions."
"The config can simply be saved and uploaded again to be fixed."
)
===========unchanged ref 0===========
at: _warnings
warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None
warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None
at: transformers.configuration_utils.PretrainedConfig
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
_auto_class: Optional[str] = None
__init__(**kwargs)
__init__(self, **kwargs)
at: transformers.configuration_utils.PretrainedConfig.__init__
self.bos_token_id = kwargs.pop("bos_token_id", None)
at: typing.Mapping
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
get(key: _KT) -> Optional[_VT_co]
at: typing.MutableMapping
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
pop(key: _KT) -> _VT
|
dalle_mini.model.modeling/GLU.__call__
|
Modified
|
borisdayma~dalle-mini
|
472c4cc44dcbd88275440c2921f0eb0bae9d30a7
|
feat: add cogview
|
<4>:<add> if self.config.ln_positions in ["normformer", "cogview"]:
<del> if self.config.ln_positions in ["normformer"]:
|
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
<0> gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
<1> self.config
<2> )
<3>
<4> if self.config.ln_positions in ["normformer"]:
<5> x = norm(
<6> self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=False
<7> )(x)
<8> w = nn.Dense(
<9> self.ffn_dim,
<10> dtype=self.dtype,
<11> use_bias=False,
<12> kernel_init=deepnet_init(gain)
<13> if self.config.use_deepnet_scaling
<14> else jax.nn.initializers.normal(self.config.init_std),
<15> )(x)
<16> w = ACT2FN[self.config.activation_function](w)
<17> v = nn.Dense(
<18> self.ffn_dim,
<19> dtype=self.dtype,
<20> use_bias=False,
<21> kernel_init=deepnet_init(gain)
<22> if self.config.use_deepnet_scaling
<23> else jax.nn.initializers.normal(self.config.init_std),
<24> )(x)
<25> x = w * v
<26> if self.config.ln_positions in ["normformer"]:
<27> x = norm(
<28> self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=False
<29> )(x)
<30> x = nn.Dropout(rate=self.config.activation_dropout)(
<31> x, deterministic=deterministic
<32> )
<33>
<34> x = nn.Dense(
<35> self.embed_dim,
<36> dtype=self.dtype,
<37> use_bias=False,
<38> kernel_init=deep</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
if self.config.ln_positions in ["swinv2"]:
x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic)
return x
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_init(gain=1)
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: dalle_mini.model.modeling.GLU
config: DalleBartConfig
ffn_dim: int
embed_dim: int
dtype: jnp.dtype = jnp.float32
is_encoder: bool = False
at: transformers.modeling_flax_utils
ACT2FN = {
"gelu": partial(nn.gelu, approximate=False),
"relu": nn.relu,
"silu": nn.swish,
"swish": nn.swish,
"gelu_new": partial(nn.gelu, approximate=True),
"quick_gelu": quick_gelu,
}
===========changed ref 0===========
<s>)
- ln_positions="deepnet", # layer normalization positions, "normformer", "swinv2", "deepnet" (same as post-ln)
use_cosine_attention=False, # used in Swin v2
tau_init=0.05, # used only in cosine attention (Swin v2)
use_deepnet_scaling=False, # used in Deepnet
use_glu=False, # "GLU Variants Improve Transformer"
**kwargs,
):
# text normalizer
self.normalize_text = normalize_text
# transformer variants
self.head_scale = head_scale # per Normformer
assert ln_type in [
"rmsnorm",
"layernorm",
], "ln_type must be 'rmsnorm' or 'layernorm'"
self.ln_type = ln_type
assert ln_positions in [
"normformer",
"swinv2",
+ "cogview",
"deepnet",
], "ln_positions must be 'normformer', 'swinv2' or 'deepnet'"
self.ln_positions = ln_positions
self.use_cosine_attention = use_cosine_attention
self.tau_init = tau_init
self.use_deepnet_scaling = use_deepnet_scaling
self.use_glu = use_glu
# common parameters
self.encoder_vocab_size = encoder_vocab_size
self.image_vocab_size = image_vocab_size
self.image_length = image_length
self.max_text_length = max_text_length
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
</s>
===========changed ref 1===========
<s>_positions="deepnet", # layer normalization positions, "normformer", "swinv2", "deepnet" (same as post-ln)
use_cosine_attention=False, # used in Swin v2
tau_init=0.05, # used only in cosine attention (Swin v2)
use_deepnet_scaling=False, # used in Deepnet
use_glu=False, # "GLU Variants Improve Transformer"
**kwargs,
):
# offset: 1
<s>_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.use_cache = use_cache
self.gradient_checkpointing = gradient_checkpointing
self.scale_embedding = (
scale_embedding # scale factor will be sqrt(d_model) if True
)
# special token id's are appended to vocab if not provided
decoder_start_token_id = kwargs.pop("decoder_start_token_id", image_vocab_size)
bos_token_id = kwargs.pop("bos_token_id", image_vocab_size)
pad_token_id = kwargs.pop("pad_token_id", image_vocab_size)
eos_token_id = kwargs.pop("eos_token_id", image_vocab_size)
# we generate to image_length + 1 (for bos) by default
min_length = kwargs.pop("min_length", image_length + 1)
max_length = kwargs.pop("max_length", image_length + 1)
super().__init__(
# args required in parent class
is_encoder_decoder=is_encoder_decoder,
tie_word_embeddings=tie_word_embeddings,</s>
===========changed ref 2===========
<s>_positions="deepnet", # layer normalization positions, "normformer", "swinv2", "deepnet" (same as post-ln)
use_cosine_attention=False, # used in Swin v2
tau_init=0.05, # used only in cosine attention (Swin v2)
use_deepnet_scaling=False, # used in Deepnet
use_glu=False, # "GLU Variants Improve Transformer"
**kwargs,
):
# offset: 2
<s> forced_eos_token_id=forced_eos_token_id,
decoder_start_token_id=decoder_start_token_id,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
min_length=min_length,
max_length=max_length,
do_sample=do_sample,
**kwargs,
)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get(
"force_bos_token_to_be_generated", False
):
self.forced_bos_token_id = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions."
"The config can simply be saved and uploaded again to be fixed."
)
|
dalle_mini.model.modeling/FFN.__call__
|
Modified
|
borisdayma~dalle-mini
|
472c4cc44dcbd88275440c2921f0eb0bae9d30a7
|
feat: add cogview
|
<3>:<add> if self.config.ln_positions in ["normformer", "cogview"]:
<del> if self.config.ln_positions in ["normformer"]:
<31>:<add> if self.config.ln_positions in ["swinv2", "cogview"]:
<del> if self.config.ln_positions in ["swinv2"]:
|
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
<0> gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
<1> self.config
<2> )
<3> if self.config.ln_positions in ["normformer"]:
<4> x = norm(
<5> self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=False
<6> )(x)
<7> x = nn.Dense(
<8> self.ffn_dim,
<9> dtype=self.dtype,
<10> use_bias=False,
<11> kernel_init=deepnet_init(gain)
<12> if self.config.use_deepnet_scaling
<13> else jax.nn.initializers.normal(self.config.init_std),
<14> )(x)
<15> x = ACT2FN[self.config.activation_function](x)
<16> if self.config.ln_positions in ["normformer"]:
<17> x = norm(
<18> self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=False
<19> )(x)
<20> x = nn.Dropout(rate=self.config.activation_dropout)(
<21> x, deterministic=deterministic
<22> )
<23> x = nn.Dense(
<24> self.embed_dim,
<25> dtype=self.dtype,
<26> use_bias=False,
<27> kernel_init=deepnet_init(gain)
<28> if self.config.use_deepnet_scaling
<29> else jax.nn.initializers.normal(self.config.init_std),
<30> )(x)
<31> if self.config.ln_positions in ["swinv2"]:
<32> x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
<33> x = nn.Dropout(</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
return x
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_init(gain=1)
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: dalle_mini.model.modeling.FFN
config: DalleBartConfig
ffn_dim: int
embed_dim: int
dtype: jnp.dtype = jnp.float32
is_encoder: bool = False
at: transformers.modeling_flax_utils
ACT2FN = {
"gelu": partial(nn.gelu, approximate=False),
"relu": nn.relu,
"silu": nn.swish,
"swish": nn.swish,
"gelu_new": partial(nn.gelu, approximate=True),
"quick_gelu": quick_gelu,
}
===========changed ref 0===========
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
self.config
)
+ if self.config.ln_positions in ["normformer", "cogview"]:
- if self.config.ln_positions in ["normformer"]:
x = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=False
)(x)
w = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
w = ACT2FN[self.config.activation_function](w)
v = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
x = w * v
if self.config.ln_positions in ["normformer"]:
x = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=False
)(x)
x = nn.Dropout(rate=self.config.activation_dropout)(
x, deterministic=deterministic
)
x = nn.Dense(
self.embed_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deep</s>
===========changed ref 1===========
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
<s> use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
+ if self.config.ln_positions in ["swinv2", "cogview"]:
- if self.config.ln_positions in ["swinv2"]:
x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic)
return x
===========changed ref 2===========
<s>)
- ln_positions="deepnet", # layer normalization positions, "normformer", "swinv2", "deepnet" (same as post-ln)
use_cosine_attention=False, # used in Swin v2
tau_init=0.05, # used only in cosine attention (Swin v2)
use_deepnet_scaling=False, # used in Deepnet
use_glu=False, # "GLU Variants Improve Transformer"
**kwargs,
):
# text normalizer
self.normalize_text = normalize_text
# transformer variants
self.head_scale = head_scale # per Normformer
assert ln_type in [
"rmsnorm",
"layernorm",
], "ln_type must be 'rmsnorm' or 'layernorm'"
self.ln_type = ln_type
assert ln_positions in [
"normformer",
"swinv2",
+ "cogview",
"deepnet",
], "ln_positions must be 'normformer', 'swinv2' or 'deepnet'"
self.ln_positions = ln_positions
self.use_cosine_attention = use_cosine_attention
self.tau_init = tau_init
self.use_deepnet_scaling = use_deepnet_scaling
self.use_glu = use_glu
# common parameters
self.encoder_vocab_size = encoder_vocab_size
self.image_vocab_size = image_vocab_size
self.image_length = image_length
self.max_text_length = max_text_length
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
</s>
|
dalle_mini.model.modeling/FlaxBartDecoderLayer.__call__
|
Modified
|
borisdayma~dalle-mini
|
472c4cc44dcbd88275440c2921f0eb0bae9d30a7
|
feat: add cogview
|
<10>:<add> if self.config.ln_positions in ["normformer", "cogview"]:
<del> if self.config.ln_positions in ["normformer"]:
<32>:<add> if self.config.ln_positions in ["normformer", "swinv2", "cogview"]:
<del> if self.config.ln_positions in ["normformer", "swinv2"]:
|
<s> FlaxBartDecoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
<0> res_gain = (
<1> deepnet_gain["decoder"]["alpha"](self.config)
<2> if self.config.use_deepnet_scaling
<3> else 1
<4> )
<5>
<6> embed_dim = self.config.d_model
<7> residual = hidden_states
<8>
<9> # Self Attention
<10> if self.config.ln_positions in ["normformer"]:
<11> hidden_states = norm(
<12> self.config.ln_type,
<13> dtype=self.dtype,
<14> epsilon=1e-05,
<15> use_scale=False,
<16> )(hidden_states)
<17> hidden_states, attn_weights = FlaxBartAttention(
<18> config=self.config,
<19> embed_dim=embed_dim,
<20> num_heads=self.config.decoder_attention_heads,
<21> dropout=self.config.attention_dropout,
<22> causal=True,
<23> bias=False,
<24> dtype=self.dtype,
<25> is_encoder=False,
<26> )(
<27> hidden_states=hidden_states,
<28> attention_mask=attention_mask,
<29> init_cache=init_cache,
<30> )
<31>
<32> if self.config.ln_positions in ["normformer", "swinv2"]:
<33> hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
<34> hidden_states
<35> )
<36> hidden_states = nn.Dropout(rate=self.config.</s>
|
===========below chunk 0===========
<s>Layer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 1
hidden_states, deterministic=deterministic
)
hidden_states = residual * res_gain + hidden_states
if self.config.ln_positions in ["deepnet"]:
hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
hidden_states
)
# Cross Attention
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
if self.config.ln_positions in ["normformer"]:
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=False,
)(hidden_states)
hidden_states, cross_attn_weights = FlaxBartAttention(
config=self.config,
embed_dim=embed_dim,
num_heads=self.config.decoder_attention_heads,
dropout=self.config.attention_dropout,
bias=False,
dtype=self.dtype,
is_encoder=False,
)(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
)
if self.config.ln_positions in ["normformer", "swinv2"]:
hidden_states = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05
)(hidden_states)
hidden_states = nn.Drop</s>
===========below chunk 1===========
<s>Layer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 2
<s>, dtype=self.dtype, epsilon=1e-05
)(hidden_states)
hidden_states = nn.Dropout(rate=self.config.dropout)(
hidden_states, deterministic=deterministic
)
hidden_states = residual * res_gain + hidden_states
if self.config.ln_positions in ["deepnet"]:
hidden_states = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05
)(hidden_states)
# Feed forward
residual = hidden_states
ff_block = (
GLU(
config=self.config,
ffn_dim=self.config.decoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=False,
)
if self.config.use_glu
else FFN(
config=self.config,
ffn_dim=self.config.decoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=False,
)
)
hidden_states = ff_block(hidden_states, deterministic=deterministic)
hidden_states = residual * res_gain + hidden_states
if self.add_norm or self.config.ln_positions in ["deepnet"]:
use_scale = self.use_scale or self.config</s>
===========below chunk 2===========
<s>Layer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 3
<s>_positions == "deepnet"
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=use_scale,
)(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights, cross_attn_weights)
return outputs
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: dalle_mini.model.modeling.FlaxBartDecoderLayer
config: DalleBartConfig
dtype: jnp.dtype = jnp.float32
add_norm: bool = False
use_scale: bool = False
at: typing
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
===========changed ref 0===========
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
self.config
)
+ if self.config.ln_positions in ["normformer", "cogview"]:
- if self.config.ln_positions in ["normformer"]:
x = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=False
)(x)
x = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
x = ACT2FN[self.config.activation_function](x)
if self.config.ln_positions in ["normformer"]:
x = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=False
)(x)
x = nn.Dropout(rate=self.config.activation_dropout)(
x, deterministic=deterministic
)
x = nn.Dense(
self.embed_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
+ if self.config.ln_positions in ["swinv2", "cogview"]:
- if self.config.ln_positions in ["swinv2"]:
x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-</s>
|
dalle_mini.model.modeling/FlaxBartEncoderLayerCollection.__call__
|
Modified
|
borisdayma~dalle-mini
|
02824a7255785a8ca60568f8db2677a32109b65f
|
feat: remove unecessary LN
|
<14>:<add> # not needed for other models which use layernorm before x-attention
<15>:<del> add_norm = (i == n_layers - 1) or (
<16>:<add> add_norm = self.config.ln_positions == "swinv2" and (
<del> (self.config.ln_positions == "swinv2") and ((i + 1) % 6 == 0)
<17>:<add> (i == n_layers - 1) or ((i + 1) % 6 == 0)
|
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayerCollection(nn.Module):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
<0> all_hidden_states = () if output_hidden_states else None
<1> all_self_attns = () if output_attentions else None
<2>
<3> n_layers = self.config.encoder_layers
<4> layer = (
<5> remat(FlaxBartEncoderLayer, static_argnums=(2, 3))
<6> if self.config.gradient_checkpointing
<7> else FlaxBartEncoderLayer
<8> )
<9> for i in range(n_layers):
<10> if output_hidden_states:
<11> all_hidden_states += (hidden_states,)
<12> # final layernorm on the output of the last layer
<13> # or every 6 layers for Swin v2
<14> # ignored args for deepnet which always add a norm with scale
<15> add_norm = (i == n_layers - 1) or (
<16> (self.config.ln_positions == "swinv2") and ((i + 1) % 6 == 0)
<17> )
<18> # we don't need to scale the norm for the last layer
<19> use_scale = i != n_layers - 1
<20> layer_outputs = layer(
<21> self.config, dtype=self.dtype, add_norm=add_norm, use_scale=use_scale
<22> )(
<23> hidden_states,
<24> attention_mask,
<25> output_attentions,
<26> deterministic,
<27> )
<28> hidden_states = layer_outputs[0]
<29> if output_attentions:
<30> all_self_attns += (layer_outputs[1],)
<31>
<32> # add hidden states from the last layer
<33> if output_hidden_states:
<34> all_hidden</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayerCollection(nn.Module):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# offset: 1
outputs = [
hidden_states,
all_hidden_states,
all_self_attns,
]
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
===========unchanged ref 0===========
at: dalle_mini.model.modeling
remat = nn_partitioning.remat
at: dalle_mini.model.modeling.FlaxBartEncoderLayerCollection
config: DalleBartConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
at: transformers.modeling_flax_outputs
FlaxBaseModelOutput(**kwargs: _VT)
FlaxBaseModelOutput(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
FlaxBaseModelOutput(map: Mapping[_KT, _VT], **kwargs: _VT)
|
dalle_mini.model.modeling/FlaxBartEncoder.setup
|
Modified
|
borisdayma~dalle-mini
|
503d6b48c94dca09b8ca01fbbd80a6156489167d
|
fix: DeepNet doesn't scale weights of embedding/output layers (#150)
|
<12>:<del> embedding_init=deepnet_init()
<13>:<del> if self.config.use_deepnet_scaling
<14>:<add> embedding_init=jax.nn.initializers.normal(self.config.init_std),
<del> else jax.nn.initializers.normal(self.config.init_std),
|
# module: dalle_mini.model.modeling
class FlaxBartEncoder(FlaxBartEncoder):
def setup(self):
<0> self.dropout_layer = nn.Dropout(rate=self.config.dropout)
<1>
<2> embed_dim = self.config.d_model
<3> self.padding_idx = self.config.pad_token_id
<4> self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
<5>
<6> # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
<7> # and adjust num_embeddings appropriately. Other models don't have this hack
<8> self.offset = 0
<9> self.embed_positions = nn.Embed(
<10> self.config.max_text_length + self.offset,
<11> embed_dim,
<12> embedding_init=deepnet_init()
<13> if self.config.use_deepnet_scaling
<14> else jax.nn.initializers.normal(self.config.init_std),
<15> )
<16> self.layers = FlaxBartEncoderLayerCollection(self.config, self.dtype)
<17> self.layernorm_embedding = norm(
<18> self.config.ln_type, dtype=self.dtype, epsilon=1e-05
<19> )
<20>
|
===========unchanged ref 0===========
at: dalle_mini.model.modeling
norm(type, *args, **kwargs)
at: math
sqrt(x: SupportsFloat, /) -> float
at: transformers.configuration_utils.PretrainedConfig.__init__
self.pad_token_id = kwargs.pop("pad_token_id", None)
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.d_model = d_model
self.dropout = dropout
self.init_std = init_std
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
at: transformers.models.bart.modeling_flax_bart.FlaxBartEncoder
config: BartConfig
embed_tokens: nn.Embed
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
setup(self)
|
dalle_mini.model.modeling/FlaxBartDecoder.setup
|
Modified
|
borisdayma~dalle-mini
|
503d6b48c94dca09b8ca01fbbd80a6156489167d
|
fix: DeepNet doesn't scale weights of embedding/output layers (#150)
|
<14>:<del> embedding_init=deepnet_init()
<15>:<del> if self.config.use_deepnet_scaling
<16>:<add> embedding_init=jax.nn.initializers.normal(self.config.init_std),
<del> else jax.nn.initializers.normal(self.config.init_std),
|
# module: dalle_mini.model.modeling
class FlaxBartDecoder(FlaxBartDecoder):
def setup(self):
<0> self.dropout_layer = nn.Dropout(rate=self.config.dropout)
<1>
<2> embed_dim = self.config.d_model
<3> self.padding_idx = self.config.pad_token_id
<4> self.embed_scale = (
<5> math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
<6> )
<7>
<8> # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
<9> # and adjust num_embeddings appropriately. Other models don't have this hack
<10> self.offset = 0
<11> self.embed_positions = nn.Embed(
<12> self.config.image_length + self.offset, # image length for BOS
<13> embed_dim,
<14> embedding_init=deepnet_init()
<15> if self.config.use_deepnet_scaling
<16> else jax.nn.initializers.normal(self.config.init_std),
<17> )
<18>
<19> self.layers = FlaxBartDecoderLayerCollection(self.config, self.dtype)
<20> self.layernorm_embedding = norm(
<21> self.config.ln_type, dtype=self.dtype, epsilon=1e-05
<22> )
<23>
|
===========unchanged ref 0===========
at: dalle_mini.model.modeling
norm(type, *args, **kwargs)
at: math
sqrt(x: SupportsFloat, /) -> float
at: transformers.configuration_utils.PretrainedConfig.__init__
self.pad_token_id = kwargs.pop("pad_token_id", None)
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.d_model = d_model
self.init_std = init_std
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
at: transformers.models.bart.modeling_flax_bart.FlaxBartDecoder
config: BartConfig
embed_tokens: nn.Embed
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
===========changed ref 0===========
# module: dalle_mini.model.modeling
class FlaxBartEncoder(FlaxBartEncoder):
def setup(self):
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
embed_dim = self.config.d_model
self.padding_idx = self.config.pad_token_id
self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
# Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 0
self.embed_positions = nn.Embed(
self.config.max_text_length + self.offset,
embed_dim,
- embedding_init=deepnet_init()
- if self.config.use_deepnet_scaling
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
- else jax.nn.initializers.normal(self.config.init_std),
)
self.layers = FlaxBartEncoderLayerCollection(self.config, self.dtype)
self.layernorm_embedding = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05
)
|
dalle_mini.model.modeling/FlaxBartModule.setup
|
Modified
|
borisdayma~dalle-mini
|
503d6b48c94dca09b8ca01fbbd80a6156489167d
|
fix: DeepNet doesn't scale weights of embedding/output layers (#150)
|
<3>:<del> embedding_init=deepnet_init()
<4>:<del> if self.config.use_deepnet_scaling
<5>:<add> embedding_init=jax.nn.initializers.normal(self.config.init_std),
<del> else jax.nn.initializers.normal(self.config.init_std),
<10>:<del> embedding_init=deepnet_init()
<11>:<del> if self.config.use_deepnet_scaling
<12>:<add> embedding_init=jax.nn.initializers.normal(self.config.init_std),
<del> else jax.nn.initializers.normal(self.config.init_std),
|
# module: dalle_mini.model.modeling
class FlaxBartModule(FlaxBartModule):
def setup(self):
<0> encoder_embed_tokens = nn.Embed(
<1> self.config.encoder_vocab_size,
<2> self.config.d_model,
<3> embedding_init=deepnet_init()
<4> if self.config.use_deepnet_scaling
<5> else jax.nn.initializers.normal(self.config.init_std),
<6> )
<7> decoder_embed_tokens = nn.Embed(
<8> self.config.image_vocab_size + 1, # image vocab size + 1 for BOS
<9> self.config.d_model,
<10> embedding_init=deepnet_init()
<11> if self.config.use_deepnet_scaling
<12> else jax.nn.initializers.normal(self.config.init_std),
<13> )
<14>
<15> self.encoder = FlaxBartEncoder(
<16> self.config, dtype=self.dtype, embed_tokens=encoder_embed_tokens
<17> )
<18> self.decoder = FlaxBartDecoder(
<19> self.config, dtype=self.dtype, embed_tokens=decoder_embed_tokens
<20> )
<21>
|
===========unchanged ref 0===========
at: dalle_mini.model.modeling.FlaxBartModule.setup
encoder_embed_tokens = nn.Embed(
self.config.encoder_vocab_size,
self.config.d_model,
embedding_init=jax.nn.initializers.normal(self.config.init_std),
)
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.d_model = d_model
self.init_std = init_std
at: transformers.models.bart.modeling_flax_bart
FlaxBartPreTrainedModel(config: BartConfig, input_shape: Tuple[int]=(1, 1), seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs)
at: transformers.models.bart.modeling_flax_bart.FlaxBartModule
config: BartConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
===========changed ref 0===========
# module: dalle_mini.model.modeling
class FlaxBartDecoder(FlaxBartDecoder):
def setup(self):
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
embed_dim = self.config.d_model
self.padding_idx = self.config.pad_token_id
self.embed_scale = (
math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
)
# Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 0
self.embed_positions = nn.Embed(
self.config.image_length + self.offset, # image length for BOS
embed_dim,
- embedding_init=deepnet_init()
- if self.config.use_deepnet_scaling
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
- else jax.nn.initializers.normal(self.config.init_std),
)
self.layers = FlaxBartDecoderLayerCollection(self.config, self.dtype)
self.layernorm_embedding = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05
)
===========changed ref 1===========
# module: dalle_mini.model.modeling
class FlaxBartEncoder(FlaxBartEncoder):
def setup(self):
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
embed_dim = self.config.d_model
self.padding_idx = self.config.pad_token_id
self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
# Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 0
self.embed_positions = nn.Embed(
self.config.max_text_length + self.offset,
embed_dim,
- embedding_init=deepnet_init()
- if self.config.use_deepnet_scaling
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
- else jax.nn.initializers.normal(self.config.init_std),
)
self.layers = FlaxBartEncoderLayerCollection(self.config, self.dtype)
self.layernorm_embedding = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05
)
|
dalle_mini.model.modeling/FlaxBartForConditionalGenerationModule.setup
|
Modified
|
borisdayma~dalle-mini
|
503d6b48c94dca09b8ca01fbbd80a6156489167d
|
fix: DeepNet doesn't scale weights of embedding/output layers (#150)
|
<6>:<del> kernel_init=deepnet_init()
<7>:<del> if self.config.use_deepnet_scaling
<8>:<add> kernel_init=jax.nn.initializers.normal(self.config.init_std),
<del> else jax.nn.initializers.normal(self.config.init_std),
|
# module: dalle_mini.model.modeling
class FlaxBartForConditionalGenerationModule(FlaxBartForConditionalGenerationModule):
def setup(self):
<0> self.model = FlaxBartModule(config=self.config, dtype=self.dtype)
<1> self.lm_head = nn.Dense(
<2> self.config.image_vocab_size
<3> + 1, # image vocab size + 1 for BOS to have same size as decoder inputs (for sharding)
<4> use_bias=False,
<5> dtype=self.dtype,
<6> kernel_init=deepnet_init()
<7> if self.config.use_deepnet_scaling
<8> else jax.nn.initializers.normal(self.config.init_std),
<9> )
<10>
|
===========unchanged ref 0===========
at: transformers.models.bart.modeling_flax_bart.FlaxBartForConditionalGenerationModule
config: BartConfig
dtype: jnp.dtype = jnp.float32
bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros
__call__(self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, position_ids, decoder_position_ids, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True, deterministic: bool=True)
===========changed ref 0===========
# module: dalle_mini.model.modeling
class FlaxBartModule(FlaxBartModule):
def setup(self):
encoder_embed_tokens = nn.Embed(
self.config.encoder_vocab_size,
self.config.d_model,
- embedding_init=deepnet_init()
- if self.config.use_deepnet_scaling
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
- else jax.nn.initializers.normal(self.config.init_std),
)
decoder_embed_tokens = nn.Embed(
self.config.image_vocab_size + 1, # image vocab size + 1 for BOS
self.config.d_model,
- embedding_init=deepnet_init()
- if self.config.use_deepnet_scaling
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
- else jax.nn.initializers.normal(self.config.init_std),
)
self.encoder = FlaxBartEncoder(
self.config, dtype=self.dtype, embed_tokens=encoder_embed_tokens
)
self.decoder = FlaxBartDecoder(
self.config, dtype=self.dtype, embed_tokens=decoder_embed_tokens
)
===========changed ref 1===========
# module: dalle_mini.model.modeling
class FlaxBartDecoder(FlaxBartDecoder):
def setup(self):
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
embed_dim = self.config.d_model
self.padding_idx = self.config.pad_token_id
self.embed_scale = (
math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
)
# Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 0
self.embed_positions = nn.Embed(
self.config.image_length + self.offset, # image length for BOS
embed_dim,
- embedding_init=deepnet_init()
- if self.config.use_deepnet_scaling
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
- else jax.nn.initializers.normal(self.config.init_std),
)
self.layers = FlaxBartDecoderLayerCollection(self.config, self.dtype)
self.layernorm_embedding = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05
)
===========changed ref 2===========
# module: dalle_mini.model.modeling
class FlaxBartEncoder(FlaxBartEncoder):
def setup(self):
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
embed_dim = self.config.d_model
self.padding_idx = self.config.pad_token_id
self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
# Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 0
self.embed_positions = nn.Embed(
self.config.max_text_length + self.offset,
embed_dim,
- embedding_init=deepnet_init()
- if self.config.use_deepnet_scaling
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
- else jax.nn.initializers.normal(self.config.init_std),
)
self.layers = FlaxBartEncoderLayerCollection(self.config, self.dtype)
self.layernorm_embedding = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05
)
|
dalle_mini.model.configuration/DalleBartConfig.__init__
|
Modified
|
borisdayma~dalle-mini
|
5bd4c2020842d23c8feb4a0acce0c35a43f13cf5
|
feat: allow more configurations
|
<16>:<add> if ln_positions == "deepnet":
<add> ln_positions = "postln"
<21>:<add> self.use_all_scale = use_all_scale
|
<s>, # used in Swin v2
tau_init=0.05, # used only in cosine attention (Swin v2)
use_deepnet_scaling=False, # used in Deepnet
+ use_glu=True, # "GLU Variants Improve Transformer"
- use_glu=False, # "GLU Variants Improve Transformer"
+ use_all_scale=True, # use scale in layernorm even when seemingly unnecessary
**kwargs,
):
<0> # text normalizer
<1> self.normalize_text = normalize_text
<2>
<3> # transformer variants
<4> self.head_scale = head_scale # per Normformer
<5> assert ln_type in [
<6> "rmsnorm",
<7> "layernorm",
<8> ], "ln_type must be 'rmsnorm' or 'layernorm'"
<9> self.ln_type = ln_type
<10> assert ln_positions in [
<11> "normformer",
<12> "swinv2",
<13> "cogview",
<14> "deepnet",
<15> ], "ln_positions must be 'normformer', 'swinv2' or 'deepnet'"
<16> self.ln_positions = ln_positions
<17> self.use_cosine_attention = use_cosine_attention
<18> self.tau_init = tau_init
<19> self.use_deepnet_scaling = use_deepnet_scaling
<20> self.use_glu = use_glu
<21>
<22> # common parameters
<23> self.encoder_vocab_size = encoder_vocab_size
<24> self.image_vocab_size = image_vocab_size
<25> self.image_length = image_length
<26> self.max_text_length = max_text_length
<27> self.d_model = d_model
<28> self.encoder_ffn_dim = encoder_ffn_dim
<29> self.encoder_layers = encoder_layers
<30> self.encoder_attention_heads = encoder_attention_heads
<31> self.decoder_ffn_dim = decoder_ffn_dim
<32> self</s>
|
===========below chunk 0===========
<s> Swin v2
tau_init=0.05, # used only in cosine attention (Swin v2)
use_deepnet_scaling=False, # used in Deepnet
+ use_glu=True, # "GLU Variants Improve Transformer"
- use_glu=False, # "GLU Variants Improve Transformer"
+ use_all_scale=True, # use scale in layernorm even when seemingly unnecessary
**kwargs,
):
# offset: 1
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.use_cache = use_cache
self.gradient_checkpointing = gradient_checkpointing
self.scale_embedding = (
scale_embedding # scale factor will be sqrt(d_model) if True
)
# special token id's are appended to vocab if not provided
decoder_start_token_id = kwargs.pop("decoder_start_token_id", image_vocab_size)
bos_token_id = kwargs.pop("bos_token_id", image_vocab_size)
pad_token_id = kwargs.pop("pad_token_id", image_vocab_size)
eos_token_id = kwargs.pop("eos_token_id", image_vocab_size)
# we generate to image_length + 1 (for bos) by default
min_length = kwargs.pop("min_length", image_length + 1)
max_length = kwargs.pop("max_length", image_length + 1)
super().__init__(
# args required in parent class
is_encoder_decoder=is_encoder_decoder,
tie_word_embeddings=tie_word_embeddings,
forced_eos_token_id=forced_eos_token_id,
decoder_start_token_id=decoder_start_token_id,
</s>
===========below chunk 1===========
<s> Swin v2
tau_init=0.05, # used only in cosine attention (Swin v2)
use_deepnet_scaling=False, # used in Deepnet
+ use_glu=True, # "GLU Variants Improve Transformer"
- use_glu=False, # "GLU Variants Improve Transformer"
+ use_all_scale=True, # use scale in layernorm even when seemingly unnecessary
**kwargs,
):
# offset: 2
<s>=forced_eos_token_id,
decoder_start_token_id=decoder_start_token_id,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
min_length=min_length,
max_length=max_length,
do_sample=do_sample,
**kwargs,
)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get(
"force_bos_token_to_be_generated", False
):
self.forced_bos_token_id = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions."
"The config can simply be saved and uploaded again to be fixed."
)
===========unchanged ref 0===========
at: transformers.configuration_utils.PretrainedConfig
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
_auto_class: Optional[str] = None
__init__(**kwargs)
__init__(self, **kwargs)
at: transformers.configuration_utils.PretrainedConfig.__init__
self.bos_token_id = kwargs.pop("bos_token_id", None)
at: typing.Mapping
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
get(key: _KT) -> Optional[_VT_co]
at: typing.MutableMapping
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
pop(key: _KT) -> _VT
|
dalle_mini.model.modeling/GLU.__call__
|
Modified
|
borisdayma~dalle-mini
|
5bd4c2020842d23c8feb4a0acce0c35a43f13cf5
|
feat: allow more configurations
|
<6>:<add> self.config.ln_type,
<add> dtype=self.dtype,
<add> epsilon=1e-05,
<add> use_scale=self.config.use_all_scale,
<del> self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=False
<28>:<add> self.config.ln_type,
<add> dtype=self.dtype,
<add> epsilon=1e-05,
<add> use_scale=self.config.use_all_scale,
<del> self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=False
|
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
<0> gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
<1> self.config
<2> )
<3>
<4> if self.config.ln_positions in ["normformer", "cogview"]:
<5> x = norm(
<6> self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=False
<7> )(x)
<8> w = nn.Dense(
<9> self.ffn_dim,
<10> dtype=self.dtype,
<11> use_bias=False,
<12> kernel_init=deepnet_init(gain)
<13> if self.config.use_deepnet_scaling
<14> else jax.nn.initializers.normal(self.config.init_std),
<15> )(x)
<16> w = ACT2FN[self.config.activation_function](w)
<17> v = nn.Dense(
<18> self.ffn_dim,
<19> dtype=self.dtype,
<20> use_bias=False,
<21> kernel_init=deepnet_init(gain)
<22> if self.config.use_deepnet_scaling
<23> else jax.nn.initializers.normal(self.config.init_std),
<24> )(x)
<25> x = w * v
<26> if self.config.ln_positions in ["normformer"]:
<27> x = norm(
<28> self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=False
<29> )(x)
<30> x = nn.Dropout(rate=self.config.activation_dropout)(
<31> x, deterministic=deterministic
<32> )
<33>
<34> x = nn.Dense(
<35> self.embed_dim,
<36> dtype=self.dtype,
<37> use_bias=False,
<38> </s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
if self.config.ln_positions in ["swinv2", "cogview"]:
x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic)
return x
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_init(gain=1)
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: dalle_mini.model.modeling.GLU
config: DalleBartConfig
ffn_dim: int
embed_dim: int
dtype: jnp.dtype = jnp.float32
is_encoder: bool = False
at: transformers.modeling_flax_utils
ACT2FN = {
"gelu": partial(nn.gelu, approximate=False),
"relu": nn.relu,
"silu": nn.swish,
"swish": nn.swish,
"gelu_new": partial(nn.gelu, approximate=True),
"quick_gelu": quick_gelu,
}
===========changed ref 0===========
<s>, # used in Swin v2
tau_init=0.05, # used only in cosine attention (Swin v2)
use_deepnet_scaling=False, # used in Deepnet
+ use_glu=True, # "GLU Variants Improve Transformer"
- use_glu=False, # "GLU Variants Improve Transformer"
+ use_all_scale=True, # use scale in layernorm even when seemingly unnecessary
**kwargs,
):
# text normalizer
self.normalize_text = normalize_text
# transformer variants
self.head_scale = head_scale # per Normformer
assert ln_type in [
"rmsnorm",
"layernorm",
], "ln_type must be 'rmsnorm' or 'layernorm'"
self.ln_type = ln_type
assert ln_positions in [
"normformer",
"swinv2",
"cogview",
"deepnet",
], "ln_positions must be 'normformer', 'swinv2' or 'deepnet'"
+ if ln_positions == "deepnet":
+ ln_positions = "postln"
self.ln_positions = ln_positions
self.use_cosine_attention = use_cosine_attention
self.tau_init = tau_init
self.use_deepnet_scaling = use_deepnet_scaling
self.use_glu = use_glu
+ self.use_all_scale = use_all_scale
# common parameters
self.encoder_vocab_size = encoder_vocab_size
self.image_vocab_size = image_vocab_size
self.image_length = image_length
self.max_text_length = max_text_length
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ff</s>
===========changed ref 1===========
<s> Swin v2
tau_init=0.05, # used only in cosine attention (Swin v2)
use_deepnet_scaling=False, # used in Deepnet
+ use_glu=True, # "GLU Variants Improve Transformer"
- use_glu=False, # "GLU Variants Improve Transformer"
+ use_all_scale=True, # use scale in layernorm even when seemingly unnecessary
**kwargs,
):
# offset: 1
<s>layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.use_cache = use_cache
self.gradient_checkpointing = gradient_checkpointing
self.scale_embedding = (
scale_embedding # scale factor will be sqrt(d_model) if True
)
# special token id's are appended to vocab if not provided
decoder_start_token_id = kwargs.pop("decoder_start_token_id", image_vocab_size)
bos_token_id = kwargs.pop("bos_token_id", image_vocab_size)
pad_token_id = kwargs.pop("pad_token_id", image_vocab_size)
eos_token_id = kwargs.pop("eos_token_id", image_vocab_size)
# we generate to image_length + 1 (for bos) by default
min_length = kwargs.pop("min_length", image_length + 1)
max_length = kwargs.pop("max_length", image_length + 1)
super().__init__(</s>
===========changed ref 2===========
<s> Swin v2
tau_init=0.05, # used only in cosine attention (Swin v2)
use_deepnet_scaling=False, # used in Deepnet
+ use_glu=True, # "GLU Variants Improve Transformer"
- use_glu=False, # "GLU Variants Improve Transformer"
+ use_all_scale=True, # use scale in layernorm even when seemingly unnecessary
**kwargs,
):
# offset: 2
<s> # args required in parent class
is_encoder_decoder=is_encoder_decoder,
tie_word_embeddings=tie_word_embeddings,
forced_eos_token_id=forced_eos_token_id,
decoder_start_token_id=decoder_start_token_id,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
min_length=min_length,
max_length=max_length,
do_sample=do_sample,
**kwargs,
)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get(
"force_bos_token_to_be_generated", False
):
self.forced_bos_token_id = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions."
"The config can simply be saved and uploaded again to be fixed."
)
|
dalle_mini.model.modeling/FFN.__call__
|
Modified
|
borisdayma~dalle-mini
|
5bd4c2020842d23c8feb4a0acce0c35a43f13cf5
|
feat: allow more configurations
|
<5>:<add> self.config.ln_type,
<add> dtype=self.dtype,
<add> epsilon=1e-05,
<add> use_scale=self.config.use_all_scale,
<del> self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=False
<18>:<add> self.config.ln_type,
<add> dtype=self.dtype,
<add> epsilon=1e-05,
<add> use_scale=self.config.use_all_scale,
<del> self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=False
|
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
<0> gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
<1> self.config
<2> )
<3> if self.config.ln_positions in ["normformer", "cogview"]:
<4> x = norm(
<5> self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=False
<6> )(x)
<7> x = nn.Dense(
<8> self.ffn_dim,
<9> dtype=self.dtype,
<10> use_bias=False,
<11> kernel_init=deepnet_init(gain)
<12> if self.config.use_deepnet_scaling
<13> else jax.nn.initializers.normal(self.config.init_std),
<14> )(x)
<15> x = ACT2FN[self.config.activation_function](x)
<16> if self.config.ln_positions in ["normformer"]:
<17> x = norm(
<18> self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=False
<19> )(x)
<20> x = nn.Dropout(rate=self.config.activation_dropout)(
<21> x, deterministic=deterministic
<22> )
<23> x = nn.Dense(
<24> self.embed_dim,
<25> dtype=self.dtype,
<26> use_bias=False,
<27> kernel_init=deepnet_init(gain)
<28> if self.config.use_deepnet_scaling
<29> else jax.nn.initializers.normal(self.config.init_std),
<30> )(x)
<31> if self.config.ln_positions in ["swinv2", "cogview"]:
<32> x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
return x
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_init(gain=1)
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: dalle_mini.model.modeling.FFN
config: DalleBartConfig
ffn_dim: int
embed_dim: int
dtype: jnp.dtype = jnp.float32
is_encoder: bool = False
at: transformers.modeling_flax_utils
ACT2FN = {
"gelu": partial(nn.gelu, approximate=False),
"relu": nn.relu,
"silu": nn.swish,
"swish": nn.swish,
"gelu_new": partial(nn.gelu, approximate=True),
"quick_gelu": quick_gelu,
}
===========changed ref 0===========
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
self.config
)
if self.config.ln_positions in ["normformer", "cogview"]:
x = norm(
+ self.config.ln_type,
+ dtype=self.dtype,
+ epsilon=1e-05,
+ use_scale=self.config.use_all_scale,
- self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=False
)(x)
w = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
w = ACT2FN[self.config.activation_function](w)
v = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
x = w * v
if self.config.ln_positions in ["normformer"]:
x = norm(
+ self.config.ln_type,
+ dtype=self.dtype,
+ epsilon=1e-05,
+ use_scale=self.config.use_all_scale,
- self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=False
)(x)
x = nn.Dropout(rate</s>
===========changed ref 1===========
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
<s>dtype, epsilon=1e-05, use_scale=False
)(x)
x = nn.Dropout(rate=self.config.activation_dropout)(
x, deterministic=deterministic
)
x = nn.Dense(
self.embed_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
if self.config.ln_positions in ["swinv2", "cogview"]:
x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic)
return x
===========changed ref 2===========
<s>, # used in Swin v2
tau_init=0.05, # used only in cosine attention (Swin v2)
use_deepnet_scaling=False, # used in Deepnet
+ use_glu=True, # "GLU Variants Improve Transformer"
- use_glu=False, # "GLU Variants Improve Transformer"
+ use_all_scale=True, # use scale in layernorm even when seemingly unnecessary
**kwargs,
):
# text normalizer
self.normalize_text = normalize_text
# transformer variants
self.head_scale = head_scale # per Normformer
assert ln_type in [
"rmsnorm",
"layernorm",
], "ln_type must be 'rmsnorm' or 'layernorm'"
self.ln_type = ln_type
assert ln_positions in [
"normformer",
"swinv2",
"cogview",
"deepnet",
], "ln_positions must be 'normformer', 'swinv2' or 'deepnet'"
+ if ln_positions == "deepnet":
+ ln_positions = "postln"
self.ln_positions = ln_positions
self.use_cosine_attention = use_cosine_attention
self.tau_init = tau_init
self.use_deepnet_scaling = use_deepnet_scaling
self.use_glu = use_glu
+ self.use_all_scale = use_all_scale
# common parameters
self.encoder_vocab_size = encoder_vocab_size
self.image_vocab_size = image_vocab_size
self.image_length = image_length
self.max_text_length = max_text_length
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ff</s>
|
dalle_mini.model.modeling/FlaxBartEncoderLayer.__call__
|
Modified
|
borisdayma~dalle-mini
|
5bd4c2020842d23c8feb4a0acce0c35a43f13cf5
|
feat: allow more configurations
|
<8>:<add> if self.config.ln_positions in ["normformer", "cogview"]:
<del> if self.config.ln_positions in ["normformer"]:
<9>:<add> hidden_states = norm(
<add> self.config.ln_type,
<add> dtype=self.dtype,
<add> epsilon=1e-05,
<add> use_scale=self.config.use_all_scale,
<del> hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
<10>:<add> )(hidden_states)
<del> hidden_states
<11>:<del> )
<22>:<add> if self.config.ln_positions in ["normformer", "swinv2", "cogview"]:
<del> if self.config.ln_positions in ["normformer", "swinv2"]:
<30>:<add> if self.config.ln_positions in ["postln"]:
<del> if self.config.ln_positions in ["deepnet"]:
|
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
<0> res_gain = (
<1> deepnet_gain["encoder"]["alpha"](self.config)
<2> if self.config.use_deepnet_scaling
<3> else 1
<4> )
<5>
<6> embed_dim = self.config.d_model
<7> residual = hidden_states
<8> if self.config.ln_positions in ["normformer"]:
<9> hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
<10> hidden_states
<11> )
<12> hidden_states, attn_weights = FlaxBartAttention(
<13> config=self.config,
<14> embed_dim=embed_dim,
<15> num_heads=self.config.encoder_attention_heads,
<16> dropout=self.config.attention_dropout,
<17> bias=False,
<18> dtype=self.dtype,
<19> is_encoder=True,
<20> )(hidden_states=hidden_states, attention_mask=attention_mask)
<21>
<22> if self.config.ln_positions in ["normformer", "swinv2"]:
<23> hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
<24> hidden_states
<25> )
<26> hidden_states = nn.Dropout(rate=self.config.dropout)(
<27> hidden_states, deterministic=deterministic
<28> )
<29> hidden_states = residual * res_gain + hidden_states
<30> if self.config.ln_positions in ["deepnet"]:
<31> hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
<32> hidden_states
</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 1
residual = hidden_states
ff_block = (
GLU(
config=self.config,
ffn_dim=self.config.encoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=True,
)
if self.config.use_glu
else FFN(
config=self.config,
ffn_dim=self.config.encoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=True,
)
)
hidden_states = ff_block(hidden_states, deterministic=deterministic)
hidden_states = residual * res_gain + hidden_states
if self.add_norm or self.config.ln_positions in ["deepnet"]:
use_scale = self.use_scale or self.config.ln_positions == "deepnet"
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=use_scale,
)(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: dalle_mini.model.modeling.FlaxBartEncoderLayer
config: DalleBartConfig
dtype: jnp.dtype = jnp.float32
add_norm: bool = False
use_scale: bool = True
at: typing
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
===========changed ref 0===========
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
self.config
)
if self.config.ln_positions in ["normformer", "cogview"]:
x = norm(
+ self.config.ln_type,
+ dtype=self.dtype,
+ epsilon=1e-05,
+ use_scale=self.config.use_all_scale,
- self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=False
)(x)
x = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
x = ACT2FN[self.config.activation_function](x)
if self.config.ln_positions in ["normformer"]:
x = norm(
+ self.config.ln_type,
+ dtype=self.dtype,
+ epsilon=1e-05,
+ use_scale=self.config.use_all_scale,
- self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=False
)(x)
x = nn.Dropout(rate=self.config.activation_dropout)(
x, deterministic=deterministic
)
x = nn.Dense(
self.embed_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.</s>
===========changed ref 1===========
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
<s>(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
if self.config.ln_positions in ["swinv2", "cogview"]:
x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic)
return x
===========changed ref 2===========
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
self.config
)
if self.config.ln_positions in ["normformer", "cogview"]:
x = norm(
+ self.config.ln_type,
+ dtype=self.dtype,
+ epsilon=1e-05,
+ use_scale=self.config.use_all_scale,
- self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=False
)(x)
w = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
w = ACT2FN[self.config.activation_function](w)
v = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
x = w * v
if self.config.ln_positions in ["normformer"]:
x = norm(
+ self.config.ln_type,
+ dtype=self.dtype,
+ epsilon=1e-05,
+ use_scale=self.config.use_all_scale,
- self.config.ln_type, dtype=self.dtype, epsilon=1e-05, use_scale=False
)(x)
x = nn.Dropout(rate</s>
|
dalle_mini.model.modeling/FlaxBartDecoderLayer.__call__
|
Modified
|
borisdayma~dalle-mini
|
5bd4c2020842d23c8feb4a0acce0c35a43f13cf5
|
feat: allow more configurations
|
<15>:<add> use_scale=self.config.use_all_scale,
<del> use_scale=False,
|
<s> FlaxBartDecoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
<0> res_gain = (
<1> deepnet_gain["decoder"]["alpha"](self.config)
<2> if self.config.use_deepnet_scaling
<3> else 1
<4> )
<5>
<6> embed_dim = self.config.d_model
<7> residual = hidden_states
<8>
<9> # Self Attention
<10> if self.config.ln_positions in ["normformer", "cogview"]:
<11> hidden_states = norm(
<12> self.config.ln_type,
<13> dtype=self.dtype,
<14> epsilon=1e-05,
<15> use_scale=False,
<16> )(hidden_states)
<17> hidden_states, attn_weights = FlaxBartAttention(
<18> config=self.config,
<19> embed_dim=embed_dim,
<20> num_heads=self.config.decoder_attention_heads,
<21> dropout=self.config.attention_dropout,
<22> causal=True,
<23> bias=False,
<24> dtype=self.dtype,
<25> is_encoder=False,
<26> )(
<27> hidden_states=hidden_states,
<28> attention_mask=attention_mask,
<29> init_cache=init_cache,
<30> )
<31>
<32> if self.config.ln_positions in ["normformer", "swinv2", "cogview"]:
<33> hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
<34> hidden_states
<35> )
<36> hidden_states = nn</s>
|
===========below chunk 0===========
<s>Layer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 1
hidden_states, deterministic=deterministic
)
hidden_states = residual * res_gain + hidden_states
if self.config.ln_positions in ["deepnet"]:
hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
hidden_states
)
# Cross Attention
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
if self.config.ln_positions in ["normformer", "cogview"]:
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=False,
)(hidden_states)
hidden_states, cross_attn_weights = FlaxBartAttention(
config=self.config,
embed_dim=embed_dim,
num_heads=self.config.decoder_attention_heads,
dropout=self.config.attention_dropout,
bias=False,
dtype=self.dtype,
is_encoder=False,
)(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
)
if self.config.ln_positions in ["normformer", "swinv2", "cogview"]:
hidden_states = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05
)(hidden_states</s>
===========below chunk 1===========
<s>Layer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 2
<s>(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05
)(hidden_states)
hidden_states = nn.Dropout(rate=self.config.dropout)(
hidden_states, deterministic=deterministic
)
hidden_states = residual * res_gain + hidden_states
if self.config.ln_positions in ["deepnet"]:
hidden_states = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05
)(hidden_states)
# Feed forward
residual = hidden_states
ff_block = (
GLU(
config=self.config,
ffn_dim=self.config.decoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=False,
)
if self.config.use_glu
else FFN(
config=self.config,
ffn_dim=self.config.decoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=False,
)
)
hidden_states = ff_block(hidden_states, deterministic=deterministic)
hidden_states = residual * res_gain + hidden_states
if self.add_norm or self.config.ln_positions in ["deepnet"]:
use_scale</s>
===========below chunk 2===========
<s>Layer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 3
<s>.use_scale or self.config.ln_positions == "deepnet"
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=use_scale,
)(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights, cross_attn_weights)
return outputs
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: dalle_mini.model.modeling.FlaxBartDecoderLayer
config: DalleBartConfig
dtype: jnp.dtype = jnp.float32
add_norm: bool = False
use_scale: bool = False
at: typing
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
|
dalle_mini.model.configuration/DalleBartConfig.__init__
|
Modified
|
borisdayma~dalle-mini
|
32f4ba551a0808722b7a8da5eb8dd569fa1022ca
|
feat: force final ln in encoder
|
<23>:<add> self.force_ln_scale = force_ln_scale
<del> self.use_all_scale = use_all_scale
<24>:<add> self.force_final_ln_encoder = force_final_ln_encoder
|
<s> Improve Transformer"
- use_glu=True, # "GLU Variants Improve Transformer"
+ # parameters that should not be necessary but could affect results
+ force_ln_scale=True, # force scale in layernorm even when followed by dense layers
+ force_final_ln_encoder=False, # force layer normalization in encoder final layer even when followed by dense layers
- use_all_scale=True, # use scale in layernorm even when seemingly unnecessary
**kwargs,
):
<0> # text normalizer
<1> self.normalize_text = normalize_text
<2>
<3> # transformer variants
<4> self.head_scale = head_scale # per Normformer
<5> assert ln_type in [
<6> "rmsnorm",
<7> "layernorm",
<8> ], "ln_type must be 'rmsnorm' or 'layernorm'"
<9> self.ln_type = ln_type
<10> assert ln_positions in [
<11> "normformer",
<12> "swinv2",
<13> "cogview",
<14> "deepnet",
<15> ], "ln_positions must be 'normformer', 'swinv2' or 'deepnet'"
<16> if ln_positions == "deepnet":
<17> ln_positions = "postln"
<18> self.ln_positions = ln_positions
<19> self.use_cosine_attention = use_cosine_attention
<20> self.tau_init = tau_init
<21> self.use_deepnet_scaling = use_deepnet_scaling
<22> self.use_glu = use_glu
<23> self.use_all_scale = use_all_scale
<24>
<25> # common parameters
<26> self.encoder_vocab_size = encoder_vocab_size
<27> self.image_vocab_size = image_vocab_size
<28> self.image_length = image_length
<29> self.max_text_length = max_text_length
<30> self.d_model = d_model
<31> self.encoder_ffn_dim = encoder_ffn_dim
<32> self.encoder_layers = encoder_</s>
|
===========below chunk 0===========
<s>
- use_glu=True, # "GLU Variants Improve Transformer"
+ # parameters that should not be necessary but could affect results
+ force_ln_scale=True, # force scale in layernorm even when followed by dense layers
+ force_final_ln_encoder=False, # force layer normalization in encoder final layer even when followed by dense layers
- use_all_scale=True, # use scale in layernorm even when seemingly unnecessary
**kwargs,
):
# offset: 1
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.use_cache = use_cache
self.gradient_checkpointing = gradient_checkpointing
self.scale_embedding = (
scale_embedding # scale factor will be sqrt(d_model) if True
)
# special token id's are appended to vocab if not provided
decoder_start_token_id = kwargs.pop("decoder_start_token_id", image_vocab_size)
bos_token_id = kwargs.pop("bos_token_id", image_vocab_size)
pad_token_id = kwargs.pop("pad_token_id", image_vocab_size)
eos_token_id = kwargs.pop("eos_token_id", image_vocab_size)
# we generate to image_length + 1 (for bos) by default
min_length = kwargs.pop("min_length", image_length + 1)
max_length = kwargs.pop("max_length", image_length + 1)
super().__init__(
# args required in parent class
is_encoder_decoder=is_encoder_decoder,
tie_word_embeddings=tie_word_</s>
===========below chunk 1===========
<s>
- use_glu=True, # "GLU Variants Improve Transformer"
+ # parameters that should not be necessary but could affect results
+ force_ln_scale=True, # force scale in layernorm even when followed by dense layers
+ force_final_ln_encoder=False, # force layer normalization in encoder final layer even when followed by dense layers
- use_all_scale=True, # use scale in layernorm even when seemingly unnecessary
**kwargs,
):
# offset: 2
<s> in parent class
is_encoder_decoder=is_encoder_decoder,
tie_word_embeddings=tie_word_embeddings,
forced_eos_token_id=forced_eos_token_id,
decoder_start_token_id=decoder_start_token_id,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
min_length=min_length,
max_length=max_length,
do_sample=do_sample,
**kwargs,
)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get(
"force_bos_token_to_be_generated", False
):
self.forced_bos_token_id = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions."
"The config can simply be saved and uploaded again to be fixed."
)
===========unchanged ref 0===========
at: _warnings
warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None
warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None
at: transformers.configuration_utils.PretrainedConfig
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
_auto_class: Optional[str] = None
__init__(**kwargs)
__init__(self, **kwargs)
at: transformers.configuration_utils.PretrainedConfig.__init__
self.bos_token_id = kwargs.pop("bos_token_id", None)
at: typing.Mapping
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
get(key: _KT) -> Optional[_VT_co]
at: typing.MutableMapping
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
pop(key: _KT) -> _VT
|
dalle_mini.model.modeling/GLU.__call__
|
Modified
|
borisdayma~dalle-mini
|
32f4ba551a0808722b7a8da5eb8dd569fa1022ca
|
feat: force final ln in encoder
|
<9>:<add> use_scale=self.config.force_ln_scale,
<del> use_scale=self.config.use_all_scale,
<34>:<add> use_scale=self.config.force_ln_scale,
<del> use_scale=self.config.use_all_scale,
|
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
<0> gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
<1> self.config
<2> )
<3>
<4> if self.config.ln_positions in ["normformer", "cogview"]:
<5> x = norm(
<6> self.config.ln_type,
<7> dtype=self.dtype,
<8> epsilon=1e-05,
<9> use_scale=self.config.use_all_scale,
<10> )(x)
<11> w = nn.Dense(
<12> self.ffn_dim,
<13> dtype=self.dtype,
<14> use_bias=False,
<15> kernel_init=deepnet_init(gain)
<16> if self.config.use_deepnet_scaling
<17> else jax.nn.initializers.normal(self.config.init_std),
<18> )(x)
<19> w = ACT2FN[self.config.activation_function](w)
<20> v = nn.Dense(
<21> self.ffn_dim,
<22> dtype=self.dtype,
<23> use_bias=False,
<24> kernel_init=deepnet_init(gain)
<25> if self.config.use_deepnet_scaling
<26> else jax.nn.initializers.normal(self.config.init_std),
<27> )(x)
<28> x = w * v
<29> if self.config.ln_positions in ["normformer"]:
<30> x = norm(
<31> self.config.ln_type,
<32> dtype=self.dtype,
<33> epsilon=1e-05,
<34> use_scale=self.config.use_all_scale,
<35> )(x)
<36> x = nn.Dropout(rate=self.config.activation_dropout)(
<37> x, deterministic=deterministic
<38> )
<39>
<40> x</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
self.embed_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
if self.config.ln_positions in ["swinv2", "cogview"]:
x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic)
return x
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_init(gain=1)
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: dalle_mini.model.modeling.GLU
config: DalleBartConfig
ffn_dim: int
embed_dim: int
dtype: jnp.dtype = jnp.float32
is_encoder: bool = False
at: transformers.modeling_flax_utils
ACT2FN = {
"gelu": partial(nn.gelu, approximate=False),
"relu": nn.relu,
"silu": nn.swish,
"swish": nn.swish,
"gelu_new": partial(nn.gelu, approximate=True),
"quick_gelu": quick_gelu,
}
===========changed ref 0===========
<s> Improve Transformer"
- use_glu=True, # "GLU Variants Improve Transformer"
+ # parameters that should not be necessary but could affect results
+ force_ln_scale=True, # force scale in layernorm even when followed by dense layers
+ force_final_ln_encoder=False, # force layer normalization in encoder final layer even when followed by dense layers
- use_all_scale=True, # use scale in layernorm even when seemingly unnecessary
**kwargs,
):
# text normalizer
self.normalize_text = normalize_text
# transformer variants
self.head_scale = head_scale # per Normformer
assert ln_type in [
"rmsnorm",
"layernorm",
], "ln_type must be 'rmsnorm' or 'layernorm'"
self.ln_type = ln_type
assert ln_positions in [
"normformer",
"swinv2",
"cogview",
"deepnet",
], "ln_positions must be 'normformer', 'swinv2' or 'deepnet'"
if ln_positions == "deepnet":
ln_positions = "postln"
self.ln_positions = ln_positions
self.use_cosine_attention = use_cosine_attention
self.tau_init = tau_init
self.use_deepnet_scaling = use_deepnet_scaling
self.use_glu = use_glu
+ self.force_ln_scale = force_ln_scale
- self.use_all_scale = use_all_scale
+ self.force_final_ln_encoder = force_final_ln_encoder
# common parameters
self.encoder_vocab_size = encoder_vocab_size
self.image_vocab_size = image_vocab_size
self.image_length = image_length
self.max_text_length = max_text_length
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_</s>
===========changed ref 1===========
<s>
- use_glu=True, # "GLU Variants Improve Transformer"
+ # parameters that should not be necessary but could affect results
+ force_ln_scale=True, # force scale in layernorm even when followed by dense layers
+ force_final_ln_encoder=False, # force layer normalization in encoder final layer even when followed by dense layers
- use_all_scale=True, # use scale in layernorm even when seemingly unnecessary
**kwargs,
):
# offset: 1
<s>d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.use_cache = use_cache
self.gradient_checkpointing = gradient_checkpointing
self.scale_embedding = (
scale_embedding # scale factor will be sqrt(d_model) if True
)
# special token id's are appended to vocab if not provided
decoder_start_token_id = kwargs.pop("decoder_start_token_id", image_vocab_size)
bos_token_id = kwargs.pop("bos_token_id", image_vocab_size)
pad_token_id = kwargs.pop("pad_token_id", image_vocab_size)
eos_token_id = kwargs.pop("eos_token_id", image_vocab_size)
# we generate to image_length + 1 (for bos) by default
min_length = kwargs.pop("min_length", image</s>
|
dalle_mini.model.modeling/FFN.__call__
|
Modified
|
borisdayma~dalle-mini
|
32f4ba551a0808722b7a8da5eb8dd569fa1022ca
|
feat: force final ln in encoder
|
<8>:<add> use_scale=self.config.force_ln_scale,
<del> use_scale=self.config.use_all_scale,
<24>:<add> use_scale=self.config.force_ln_scale,
<del> use_scale=self.config.use_all_scale,
|
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
<0> gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
<1> self.config
<2> )
<3> if self.config.ln_positions in ["normformer", "cogview"]:
<4> x = norm(
<5> self.config.ln_type,
<6> dtype=self.dtype,
<7> epsilon=1e-05,
<8> use_scale=self.config.use_all_scale,
<9> )(x)
<10> x = nn.Dense(
<11> self.ffn_dim,
<12> dtype=self.dtype,
<13> use_bias=False,
<14> kernel_init=deepnet_init(gain)
<15> if self.config.use_deepnet_scaling
<16> else jax.nn.initializers.normal(self.config.init_std),
<17> )(x)
<18> x = ACT2FN[self.config.activation_function](x)
<19> if self.config.ln_positions in ["normformer"]:
<20> x = norm(
<21> self.config.ln_type,
<22> dtype=self.dtype,
<23> epsilon=1e-05,
<24> use_scale=self.config.use_all_scale,
<25> )(x)
<26> x = nn.Dropout(rate=self.config.activation_dropout)(
<27> x, deterministic=deterministic
<28> )
<29> x = nn.Dense(
<30> self.embed_dim,
<31> dtype=self.dtype,
<32> use_bias=False,
<33> kernel_init=deepnet_init(gain)
<34> if self.config.use_deepnet_scaling
<35> else jax.nn.initializers.normal(self.config.init_std),
<36> )(x)
<37> if self.config.ln_positions in ["swinv2",</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic)
return x
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_init(gain=1)
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: dalle_mini.model.modeling.FFN
config: DalleBartConfig
ffn_dim: int
embed_dim: int
dtype: jnp.dtype = jnp.float32
is_encoder: bool = False
at: transformers.modeling_flax_utils
ACT2FN = {
"gelu": partial(nn.gelu, approximate=False),
"relu": nn.relu,
"silu": nn.swish,
"swish": nn.swish,
"gelu_new": partial(nn.gelu, approximate=True),
"quick_gelu": quick_gelu,
}
===========changed ref 0===========
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
self.config
)
if self.config.ln_positions in ["normformer", "cogview"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
+ use_scale=self.config.force_ln_scale,
- use_scale=self.config.use_all_scale,
)(x)
w = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
w = ACT2FN[self.config.activation_function](w)
v = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
x = w * v
if self.config.ln_positions in ["normformer"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
+ use_scale=self.config.force_ln_scale,
- use_scale=self.config.use_all_scale,
)(x)
x = nn.Dropout(rate=self.config.activation_dropout)(
x, deterministic=deterministic
)
x = nn.Dense</s>
===========changed ref 1===========
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
<s>=self.config.activation_dropout)(
x, deterministic=deterministic
)
x = nn.Dense(
self.embed_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
if self.config.ln_positions in ["swinv2", "cogview"]:
x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic)
return x
===========changed ref 2===========
<s> Improve Transformer"
- use_glu=True, # "GLU Variants Improve Transformer"
+ # parameters that should not be necessary but could affect results
+ force_ln_scale=True, # force scale in layernorm even when followed by dense layers
+ force_final_ln_encoder=False, # force layer normalization in encoder final layer even when followed by dense layers
- use_all_scale=True, # use scale in layernorm even when seemingly unnecessary
**kwargs,
):
# text normalizer
self.normalize_text = normalize_text
# transformer variants
self.head_scale = head_scale # per Normformer
assert ln_type in [
"rmsnorm",
"layernorm",
], "ln_type must be 'rmsnorm' or 'layernorm'"
self.ln_type = ln_type
assert ln_positions in [
"normformer",
"swinv2",
"cogview",
"deepnet",
], "ln_positions must be 'normformer', 'swinv2' or 'deepnet'"
if ln_positions == "deepnet":
ln_positions = "postln"
self.ln_positions = ln_positions
self.use_cosine_attention = use_cosine_attention
self.tau_init = tau_init
self.use_deepnet_scaling = use_deepnet_scaling
self.use_glu = use_glu
+ self.force_ln_scale = force_ln_scale
- self.use_all_scale = use_all_scale
+ self.force_final_ln_encoder = force_final_ln_encoder
# common parameters
self.encoder_vocab_size = encoder_vocab_size
self.image_vocab_size = image_vocab_size
self.image_length = image_length
self.max_text_length = max_text_length
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_</s>
|
dalle_mini.model.modeling/FlaxBartEncoderLayer.__call__
|
Modified
|
borisdayma~dalle-mini
|
32f4ba551a0808722b7a8da5eb8dd569fa1022ca
|
feat: force final ln in encoder
|
<13>:<add> use_scale=self.config.force_ln_scale,
<del> use_scale=self.config.use_all_scale,
|
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
<0> res_gain = (
<1> deepnet_gain["encoder"]["alpha"](self.config)
<2> if self.config.use_deepnet_scaling
<3> else 1
<4> )
<5>
<6> embed_dim = self.config.d_model
<7> residual = hidden_states
<8> if self.config.ln_positions in ["normformer", "cogview"]:
<9> hidden_states = norm(
<10> self.config.ln_type,
<11> dtype=self.dtype,
<12> epsilon=1e-05,
<13> use_scale=self.config.use_all_scale,
<14> )(hidden_states)
<15> hidden_states, attn_weights = FlaxBartAttention(
<16> config=self.config,
<17> embed_dim=embed_dim,
<18> num_heads=self.config.encoder_attention_heads,
<19> dropout=self.config.attention_dropout,
<20> bias=False,
<21> dtype=self.dtype,
<22> is_encoder=True,
<23> )(hidden_states=hidden_states, attention_mask=attention_mask)
<24>
<25> if self.config.ln_positions in ["normformer", "swinv2", "cogview"]:
<26> hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
<27> hidden_states
<28> )
<29> hidden_states = nn.Dropout(rate=self.config.dropout)(
<30> hidden_states, deterministic=deterministic
<31> )
<32> hidden_states = residual * res_gain + hidden_states
<33> if self.config.ln_positions in ["postln"]:
<34> </s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 1
hidden_states
)
residual = hidden_states
ff_block = (
GLU(
config=self.config,
ffn_dim=self.config.encoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=True,
)
if self.config.use_glu
else FFN(
config=self.config,
ffn_dim=self.config.encoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=True,
)
)
hidden_states = ff_block(hidden_states, deterministic=deterministic)
hidden_states = residual * res_gain + hidden_states
if self.add_norm or self.config.ln_positions in ["postln"]:
use_scale = (
self.use_scale
or self.config.ln_positions == "postln"
or self.config.use_all_scale
)
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=use_scale,
)(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: dalle_mini.model.modeling.FlaxBartEncoderLayer
config: DalleBartConfig
dtype: jnp.dtype = jnp.float32
add_norm: bool = False
use_scale: bool = True
at: typing
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
===========changed ref 0===========
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
self.config
)
if self.config.ln_positions in ["normformer", "cogview"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
+ use_scale=self.config.force_ln_scale,
- use_scale=self.config.use_all_scale,
)(x)
x = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
x = ACT2FN[self.config.activation_function](x)
if self.config.ln_positions in ["normformer"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
+ use_scale=self.config.force_ln_scale,
- use_scale=self.config.use_all_scale,
)(x)
x = nn.Dropout(rate=self.config.activation_dropout)(
x, deterministic=deterministic
)
x = nn.Dense(
self.embed_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
if self.config.ln_positions in ["swinv2", "c</s>
===========changed ref 1===========
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
<s>config.init_std),
)(x)
if self.config.ln_positions in ["swinv2", "cogview"]:
x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic)
return x
===========changed ref 2===========
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
self.config
)
if self.config.ln_positions in ["normformer", "cogview"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
+ use_scale=self.config.force_ln_scale,
- use_scale=self.config.use_all_scale,
)(x)
w = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
w = ACT2FN[self.config.activation_function](w)
v = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
x = w * v
if self.config.ln_positions in ["normformer"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
+ use_scale=self.config.force_ln_scale,
- use_scale=self.config.use_all_scale,
)(x)
x = nn.Dropout(rate=self.config.activation_dropout)(
x, deterministic=deterministic
)
x = nn.Dense</s>
|
dalle_mini.model.modeling/FlaxBartDecoderLayer.__call__
|
Modified
|
borisdayma~dalle-mini
|
32f4ba551a0808722b7a8da5eb8dd569fa1022ca
|
feat: force final ln in encoder
|
<15>:<add> use_scale=self.config.force_ln_scale,
<del> use_scale=self.config.use_all_scale,
|
<s> FlaxBartDecoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
<0> res_gain = (
<1> deepnet_gain["decoder"]["alpha"](self.config)
<2> if self.config.use_deepnet_scaling
<3> else 1
<4> )
<5>
<6> embed_dim = self.config.d_model
<7> residual = hidden_states
<8>
<9> # Self Attention
<10> if self.config.ln_positions in ["normformer", "cogview"]:
<11> hidden_states = norm(
<12> self.config.ln_type,
<13> dtype=self.dtype,
<14> epsilon=1e-05,
<15> use_scale=self.config.use_all_scale,
<16> )(hidden_states)
<17> hidden_states, attn_weights = FlaxBartAttention(
<18> config=self.config,
<19> embed_dim=embed_dim,
<20> num_heads=self.config.decoder_attention_heads,
<21> dropout=self.config.attention_dropout,
<22> causal=True,
<23> bias=False,
<24> dtype=self.dtype,
<25> is_encoder=False,
<26> )(
<27> hidden_states=hidden_states,
<28> attention_mask=attention_mask,
<29> init_cache=init_cache,
<30> )
<31>
<32> if self.config.ln_positions in ["normformer", "swinv2", "cogview"]:
<33> hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
<34> hidden_states
<35> )</s>
|
===========below chunk 0===========
<s>Layer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 1
hidden_states, deterministic=deterministic
)
hidden_states = residual * res_gain + hidden_states
if self.config.ln_positions in ["postln"]:
hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
hidden_states
)
# Cross Attention
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
if self.config.ln_positions in ["normformer", "cogview"]:
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.use_all_scale,
)(hidden_states)
hidden_states, cross_attn_weights = FlaxBartAttention(
config=self.config,
embed_dim=embed_dim,
num_heads=self.config.decoder_attention_heads,
dropout=self.config.attention_dropout,
bias=False,
dtype=self.dtype,
is_encoder=False,
)(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
)
if self.config.ln_positions in ["normformer", "swinv2", "cogview"]:
hidden_states = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-</s>
===========below chunk 1===========
<s>Layer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 2
<s>:
hidden_states = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05
)(hidden_states)
hidden_states = nn.Dropout(rate=self.config.dropout)(
hidden_states, deterministic=deterministic
)
hidden_states = residual * res_gain + hidden_states
if self.config.ln_positions in ["postln"]:
hidden_states = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05
)(hidden_states)
# Feed forward
residual = hidden_states
ff_block = (
GLU(
config=self.config,
ffn_dim=self.config.decoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=False,
)
if self.config.use_glu
else FFN(
config=self.config,
ffn_dim=self.config.decoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=False,
)
)
hidden_states = ff_block(hidden_states, deterministic=deterministic)
hidden_states = residual * res_gain + hidden_states
if self.add_norm or self.config.ln_positions in ["post</s>
===========below chunk 2===========
<s>Layer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 3
<s>:
use_scale = (
self.use_scale
or self.config.ln_positions == "postln"
or self.config.use_all_scale
)
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=use_scale,
)(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights, cross_attn_weights)
return outputs
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: dalle_mini.model.modeling.FlaxBartDecoderLayer
config: DalleBartConfig
dtype: jnp.dtype = jnp.float32
add_norm: bool = False
use_scale: bool = False
at: typing
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
|
dalle_mini.model.modeling/FlaxBartEncoderLayerCollection.__call__
|
Modified
|
borisdayma~dalle-mini
|
32f4ba551a0808722b7a8da5eb8dd569fa1022ca
|
feat: force final ln in encoder
|
<16>:<add> add_norm = self.config.force_final_ln_encoder or (
<add> self.config.ln_positions == "swinv2"
<del> add_norm = self.config.ln_positions == "swinv2" and (
<17>:<add> and ((i == n_layers - 1) or ((i + 1) % 6 == 0))
<del> (i == n_layers - 1) or ((i + 1) % 6 == 0)
|
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayerCollection(nn.Module):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
<0> all_hidden_states = () if output_hidden_states else None
<1> all_self_attns = () if output_attentions else None
<2>
<3> n_layers = self.config.encoder_layers
<4> layer = (
<5> remat(FlaxBartEncoderLayer, static_argnums=(2, 3))
<6> if self.config.gradient_checkpointing
<7> else FlaxBartEncoderLayer
<8> )
<9> for i in range(n_layers):
<10> if output_hidden_states:
<11> all_hidden_states += (hidden_states,)
<12> # final layernorm on the output of the last layer
<13> # or every 6 layers for Swin v2
<14> # not needed for other models which use layernorm before x-attention
<15> # ignored args for deepnet which always add a norm with scale
<16> add_norm = self.config.ln_positions == "swinv2" and (
<17> (i == n_layers - 1) or ((i + 1) % 6 == 0)
<18> )
<19> # we don't need to scale the norm for the last layer
<20> use_scale = i != n_layers - 1
<21> layer_outputs = layer(
<22> self.config, dtype=self.dtype, add_norm=add_norm, use_scale=use_scale
<23> )(
<24> hidden_states,
<25> attention_mask,
<26> output_attentions,
<27> deterministic,
<28> )
<29> hidden_states = layer_outputs[0]
<30> if output_attentions:
<31> all_self_attns += (layer_outputs[1],)
<32>
<33> # add hidden states from the last layer</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayerCollection(nn.Module):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# offset: 1
all_hidden_states += (hidden_states,)
outputs = [
hidden_states,
all_hidden_states,
all_self_attns,
]
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
===========unchanged ref 0===========
at: dalle_mini.model.modeling
remat = nn_partitioning.remat
at: dalle_mini.model.modeling.FlaxBartEncoderLayerCollection
config: DalleBartConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
at: transformers.modeling_flax_outputs
FlaxBaseModelOutput(**kwargs: _VT)
FlaxBaseModelOutput(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
FlaxBaseModelOutput(map: Mapping[_KT, _VT], **kwargs: _VT)
===========changed ref 0===========
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
self.config
)
if self.config.ln_positions in ["normformer", "cogview"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
+ use_scale=self.config.force_ln_scale,
- use_scale=self.config.use_all_scale,
)(x)
x = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
x = ACT2FN[self.config.activation_function](x)
if self.config.ln_positions in ["normformer"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
+ use_scale=self.config.force_ln_scale,
- use_scale=self.config.use_all_scale,
)(x)
x = nn.Dropout(rate=self.config.activation_dropout)(
x, deterministic=deterministic
)
x = nn.Dense(
self.embed_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
if self.config.ln_positions in ["swinv2", "c</s>
===========changed ref 1===========
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
<s>config.init_std),
)(x)
if self.config.ln_positions in ["swinv2", "cogview"]:
x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic)
return x
===========changed ref 2===========
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
self.config
)
if self.config.ln_positions in ["normformer", "cogview"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
+ use_scale=self.config.force_ln_scale,
- use_scale=self.config.use_all_scale,
)(x)
w = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
w = ACT2FN[self.config.activation_function](w)
v = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
x = w * v
if self.config.ln_positions in ["normformer"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
+ use_scale=self.config.force_ln_scale,
- use_scale=self.config.use_all_scale,
)(x)
x = nn.Dropout(rate=self.config.activation_dropout)(
x, deterministic=deterministic
)
x = nn.Dense</s>
===========changed ref 3===========
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
<s>=self.config.activation_dropout)(
x, deterministic=deterministic
)
x = nn.Dense(
self.embed_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
if self.config.ln_positions in ["swinv2", "cogview"]:
x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic)
return x
|
dalle_mini.model.configuration/DalleBartConfig.__init__
|
Modified
|
borisdayma~dalle-mini
|
69bcbeb80ce94cfda6acdb0fae3a3499d668fa20
|
feat: placeholders for more config
|
<18>:<add> assert use_alibi is False, "use_alibi is not supported yet"
<23>:<add> self.use_alibi = use_alibi
<add> self.sink_iters = sink_iters
|
<s> Transformer"
+ use_alibi=False, # from "Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation"
+ sink_iters=1, # used in SinkFormers
# parameters that should not be necessary but could affect results
force_ln_scale=True, # force scale in layernorm even when followed by dense layers
force_final_ln_encoder=False, # force layer normalization in encoder final layer even when followed by dense layers
**kwargs,
):
<0> # text normalizer
<1> self.normalize_text = normalize_text
<2>
<3> # transformer variants
<4> self.head_scale = head_scale # per Normformer
<5> assert ln_type in [
<6> "rmsnorm",
<7> "layernorm",
<8> ], "ln_type must be 'rmsnorm' or 'layernorm'"
<9> self.ln_type = ln_type
<10> assert ln_positions in [
<11> "normformer",
<12> "swinv2",
<13> "cogview",
<14> "deepnet",
<15> ], "ln_positions must be 'normformer', 'swinv2' or 'deepnet'"
<16> if ln_positions == "deepnet":
<17> ln_positions = "postln"
<18> self.ln_positions = ln_positions
<19> self.use_cosine_attention = use_cosine_attention
<20> self.tau_init = tau_init
<21> self.use_deepnet_scaling = use_deepnet_scaling
<22> self.use_glu = use_glu
<23> self.force_ln_scale = force_ln_scale
<24> self.force_final_ln_encoder = force_final_ln_encoder
<25>
<26> # common parameters
<27> self.encoder_vocab_size = encoder_vocab_size
<28> self.image_vocab_size = image_vocab_size
<29> self.image_length = image_length
<30> self.max_text_length = max_text_length
<31> self.d_model = d_model
<32> self.encoder_ffn</s>
|
===========below chunk 0===========
<s> use_alibi=False, # from "Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation"
+ sink_iters=1, # used in SinkFormers
# parameters that should not be necessary but could affect results
force_ln_scale=True, # force scale in layernorm even when followed by dense layers
force_final_ln_encoder=False, # force layer normalization in encoder final layer even when followed by dense layers
**kwargs,
):
# offset: 1
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.use_cache = use_cache
self.gradient_checkpointing = gradient_checkpointing
self.scale_embedding = (
scale_embedding # scale factor will be sqrt(d_model) if True
)
# special token id's are appended to vocab if not provided
decoder_start_token_id = kwargs.pop("decoder_start_token_id", image_vocab_size)
bos_token_id = kwargs.pop("bos_token_id", image_vocab_size)
pad_token_id = kwargs.pop("pad_token_id", image_vocab_size)
eos_token_id = kwargs.pop("eos_token_id", image_vocab_size)
# we generate to image_length + 1 (for bos) by default
min_length = kwargs.pop("min_length", image_length + 1)
max_length = kwargs.pop("max_length", image_length + 1)
super().__init__(
# args required in parent class
is_encoder_decoder=is_encoder_decoder,
</s>
===========below chunk 1===========
<s> use_alibi=False, # from "Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation"
+ sink_iters=1, # used in SinkFormers
# parameters that should not be necessary but could affect results
force_ln_scale=True, # force scale in layernorm even when followed by dense layers
force_final_ln_encoder=False, # force layer normalization in encoder final layer even when followed by dense layers
**kwargs,
):
# offset: 2
<s> super().__init__(
# args required in parent class
is_encoder_decoder=is_encoder_decoder,
tie_word_embeddings=tie_word_embeddings,
forced_eos_token_id=forced_eos_token_id,
decoder_start_token_id=decoder_start_token_id,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
min_length=min_length,
max_length=max_length,
do_sample=do_sample,
**kwargs,
)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get(
"force_bos_token_to_be_generated", False
):
self.forced_bos_token_id = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions."
"The config can simply be saved and uploaded again to be fixed."
)
===========unchanged ref 0===========
at: dalle_mini.model.configuration.DalleBartConfig.__init__
self.forced_bos_token_id = self.bos_token_id
at: transformers.configuration_utils.PretrainedConfig
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
_auto_class: Optional[str] = None
__init__(**kwargs)
__init__(self, **kwargs)
at: typing.Mapping
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
get(key: _KT) -> Optional[_VT_co]
at: typing.MutableMapping
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
pop(key: _KT) -> _VT
|
dalle_mini.model.configuration/DalleBartConfig.__init__
|
Modified
|
borisdayma~dalle-mini
|
f139b0be7c3b03f0520c106b12540b3fbd8e6e82
|
feat: add sinkformer + custom final ln + pre-ln (#151)
|
<4>:<add> self.use_head_scale = use_head_scale # per Normformer
<del> self.head_scale = head_scale # per Normformer
<10>:<add> if ln_positions == "deepnet":
<add> ln_positions = "postln"
<14>:<add> "postln",
<add> "preln",
<del> "deepnet",
<15>:<add> ], "ln_positions must be 'normformer', 'swinv2', 'cogview', 'postln', 'preln'"
<del> ], "ln_positions must be 'normformer', 'swinv2' or 'deepnet'"
<16>:<del> if ln_positions == "deepnet":
<17>:<del> ln_positions = "postln"
<25>:<add> self.sinkhorn_iters = sinkhorn_iters
<del> self.sink_iters = sink_iters
<26>:<add> if ln_positions == "postln":
<add> assert (
<add> use_final_ln_encoder
<add> ), "use_final_ln_encoder must be True when ln_positions is 'postln'"
<add> assert (
<add> use_final_ln_decoder
<add> ), "use_final_ln_decoder must be True when ln_positions is 'postln'"
<add> self.use_final_ln_encoder = use_final_ln_encoder
<add> self.
|
<s>, # final layer normalization in encoder
+ use_final_ln_decoder=False, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
+ force_ln_scale=False, # force scale in layernorm even when followed by dense layers
- force_ln_scale=True, # force scale in layernorm even when followed by dense layers
- force_final_ln_encoder=False, # force layer normalization in encoder final layer even when followed by dense layers
**kwargs,
):
<0> # text normalizer
<1> self.normalize_text = normalize_text
<2>
<3> # transformer variants
<4> self.head_scale = head_scale # per Normformer
<5> assert ln_type in [
<6> "rmsnorm",
<7> "layernorm",
<8> ], "ln_type must be 'rmsnorm' or 'layernorm'"
<9> self.ln_type = ln_type
<10> assert ln_positions in [
<11> "normformer",
<12> "swinv2",
<13> "cogview",
<14> "deepnet",
<15> ], "ln_positions must be 'normformer', 'swinv2' or 'deepnet'"
<16> if ln_positions == "deepnet":
<17> ln_positions = "postln"
<18> assert use_alibi is False, "use_alibi is not supported yet"
<19> self.ln_positions = ln_positions
<20> self.use_cosine_attention = use_cosine_attention
<21> self.tau_init = tau_init
<22> self.use_deepnet_scaling = use_deepnet_scaling
<23> self.use_glu = use_glu
<24> self.use_alibi = use_alibi
<25> self.sink_iters = sink_iters
<26> self.force_ln_scale = force_ln_scale
<27> self.force_final_ln_encoder = force_final_ln_encoder
<28>
<29> # common parameters
<30> self.encoder_vocab_size = encoder_vocab_size
<31> self.image_vocab_size = image_vocab</s>
|
===========below chunk 0===========
<s> normalization in encoder
+ use_final_ln_decoder=False, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
+ force_ln_scale=False, # force scale in layernorm even when followed by dense layers
- force_ln_scale=True, # force scale in layernorm even when followed by dense layers
- force_final_ln_encoder=False, # force layer normalization in encoder final layer even when followed by dense layers
**kwargs,
):
# offset: 1
self.image_length = image_length
self.max_text_length = max_text_length
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.use_cache = use_cache
self.gradient_checkpointing = gradient_checkpointing
self.scale_embedding = (
scale_embedding # scale factor will be sqrt(d_model) if True
)
# special token id's are appended to vocab if not provided
decoder_start_token_id = kwargs.pop("decoder_start_token_id", image_vocab_size)
bos_token_id = kwargs.pop("bos_token_id", image_vocab_size)
pad_token_id = kwargs.pop("pad_token_id", image_vocab_size)
eos_token_id = kwargs.pop("eos_token_id", image_vocab_size)
# we generate to image_length + 1 (for bos) by default
min_length = kwargs.pop("min_length", image_length + 1</s>
===========below chunk 1===========
<s> normalization in encoder
+ use_final_ln_decoder=False, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
+ force_ln_scale=False, # force scale in layernorm even when followed by dense layers
- force_ln_scale=True, # force scale in layernorm even when followed by dense layers
- force_final_ln_encoder=False, # force layer normalization in encoder final layer even when followed by dense layers
**kwargs,
):
# offset: 2
<s> image_length + 1 (for bos) by default
min_length = kwargs.pop("min_length", image_length + 1)
max_length = kwargs.pop("max_length", image_length + 1)
super().__init__(
# args required in parent class
is_encoder_decoder=is_encoder_decoder,
tie_word_embeddings=tie_word_embeddings,
forced_eos_token_id=forced_eos_token_id,
decoder_start_token_id=decoder_start_token_id,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
min_length=min_length,
max_length=max_length,
do_sample=do_sample,
**kwargs,
)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get(
"force_bos_token_to_be_generated", False
):
self.forced_bos_token_id = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions."
"The config can simply be saved and uploaded again to be fixed."
===========unchanged ref 0===========
at: transformers.configuration_utils.PretrainedConfig
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
_auto_class: Optional[str] = None
__init__(**kwargs)
__init__(self, **kwargs)
at: typing.MutableMapping
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
pop(key: _KT) -> _VT
|
dalle_mini.model.modeling/FlaxBartAttention.setup
|
Modified
|
borisdayma~dalle-mini
|
f139b0be7c3b03f0520c106b12540b3fbd8e6e82
|
feat: add sinkformer + custom final ln + pre-ln (#151)
|
<40>:<add> if self.config.use_head_scale:
<del> if
|
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def setup(self) -> None:
<0> self.head_dim = self.embed_dim // self.num_heads
<1> if self.head_dim * self.num_heads != self.embed_dim:
<2> raise ValueError(
<3> f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
<4> f" and `num_heads`: {self.num_heads})."
<5> )
<6>
<7> dense = partial(
<8> nn.Dense,
<9> self.embed_dim,
<10> use_bias=self.bias,
<11> dtype=self.dtype,
<12> )
<13>
<14> gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
<15> self.config
<16> )
<17>
<18> self.q_proj = dense(
<19> kernel_init=deepnet_init()
<20> if self.config.use_deepnet_scaling
<21> else jax.nn.initializers.normal(self.config.init_std)
<22> )
<23> self.k_proj = dense(
<24> kernel_init=deepnet_init()
<25> if self.config.use_deepnet_scaling
<26> else jax.nn.initializers.normal(self.config.init_std)
<27> )
<28> self.v_proj = dense(
<29> kernel_init=deepnet_init(gain)
<30> if self.config.use_deepnet_scaling
<31> else jax.nn.initializers.normal(self.config.init_std)
<32> )
<33> self.out_proj = dense(
<34> kernel_init=deepnet_init(gain)
<35> if self.config.use_deepnet_scaling
<36> else jax.nn.initializers.normal(self.config.init_std)
<37> )
<38> self.dropout_layer = nn.Dropout(rate=self.dropout)
<39>
<40> if</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def setup(self) -> None:
# offset: 1
self.head_scale = self.param(
"head_scale", jax.nn.initializers.ones, (1, 1, self.num_heads, 1)
)
if self.config.use_cosine_attention:
self.tau = self.param(
"tau",
jax.nn.initializers.constant(self.config.tau_init),
(1, self.num_heads, 1, 1),
)
if self.causal:
# used only in decoder
self.causal_mask = make_causal_mask(
jnp.ones((1, self.config.image_length), dtype="bool"), dtype="bool"
)
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_init(gain=1)
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
at: dalle_mini.model.modeling.FlaxBartAttention
is_encoder: bool = False
at: functools
partial(func: Callable[..., _T], *args: Any, **kwargs: Any)
partial(func, *args, **keywords, /) -> function with partial application()
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.init_std = init_std
at: transformers.models.bart.modeling_flax_bart.FlaxBartAttention
config: BartConfig
embed_dim: int
num_heads: int
dropout: float = 0.0
causal: bool = False
bias: bool = True
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
setup(self) -> None
===========changed ref 0===========
<s>, # final layer normalization in encoder
+ use_final_ln_decoder=False, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
+ force_ln_scale=False, # force scale in layernorm even when followed by dense layers
- force_ln_scale=True, # force scale in layernorm even when followed by dense layers
- force_final_ln_encoder=False, # force layer normalization in encoder final layer even when followed by dense layers
**kwargs,
):
# text normalizer
self.normalize_text = normalize_text
# transformer variants
+ self.use_head_scale = use_head_scale # per Normformer
- self.head_scale = head_scale # per Normformer
assert ln_type in [
"rmsnorm",
"layernorm",
], "ln_type must be 'rmsnorm' or 'layernorm'"
self.ln_type = ln_type
+ if ln_positions == "deepnet":
+ ln_positions = "postln"
assert ln_positions in [
"normformer",
"swinv2",
"cogview",
+ "postln",
+ "preln",
- "deepnet",
+ ], "ln_positions must be 'normformer', 'swinv2', 'cogview', 'postln', 'preln'"
- ], "ln_positions must be 'normformer', 'swinv2' or 'deepnet'"
- if ln_positions == "deepnet":
- ln_positions = "postln"
assert use_alibi is False, "use_alibi is not supported yet"
self.ln_positions = ln_positions
self.use_cosine_attention = use_cosine_attention
self.tau_init = tau_init
self.use_deepnet_scaling = use_deepnet_scaling
self.use_glu = use_glu
self.use_alibi = use_alibi
+ self.sinkhorn_iters = sinkhorn_</s>
===========changed ref 1===========
<s> normalization in encoder
+ use_final_ln_decoder=False, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
+ force_ln_scale=False, # force scale in layernorm even when followed by dense layers
- force_ln_scale=True, # force scale in layernorm even when followed by dense layers
- force_final_ln_encoder=False, # force layer normalization in encoder final layer even when followed by dense layers
**kwargs,
):
# offset: 1
<s>
self.use_alibi = use_alibi
+ self.sinkhorn_iters = sinkhorn_iters
- self.sink_iters = sink_iters
+ if ln_positions == "postln":
+ assert (
+ use_final_ln_encoder
+ ), "use_final_ln_encoder must be True when ln_positions is 'postln'"
+ assert (
+ use_final_ln_decoder
+ ), "use_final_ln_decoder must be True when ln_positions is 'postln'"
+ self.use_final_ln_encoder = use_final_ln_encoder
+ self.use_final_ln_decoder = use_final_ln_decoder
self.force_ln_scale = force_ln_scale
- self.force_final_ln_encoder = force_final_ln_encoder
# common parameters
self.encoder_vocab_size = encoder_vocab_size
self.image_vocab_size = image_vocab_size
self.image_length = image_length
self.max_text_length = max_text_length
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers</s>
|
dalle_mini.model.modeling/FlaxBartAttention.__call__
|
Modified
|
borisdayma~dalle-mini
|
f139b0be7c3b03f0520c106b12540b3fbd8e6e82
|
feat: add sinkformer + custom final ln + pre-ln (#151)
|
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
<0> """Input shape: Batch x Time x Channel"""
<1>
<2> # if key_value_states are provided this layer is used as a cross-attention layer
<3> # for the decoder
<4> is_cross_attention = key_value_states is not None
<5> batch_size = hidden_states.shape[0]
<6>
<7> # get query proj
<8> query_states = self.q_proj(hidden_states)
<9> # get key, value proj
<10> if is_cross_attention:
<11> # cross_attentions
<12> key_states = self.k_proj(key_value_states)
<13> value_states = self.v_proj(key_value_states)
<14> else:
<15> # self_attention
<16> key_states = self.k_proj(hidden_states)
<17> value_states = self.v_proj(hidden_states)
<18>
<19> query_states = self._split_heads(query_states)
<20> key_states = self._split_heads(key_states)
<21> value_states = self._split_heads(value_states)
<22>
<23> # handle cache prepare causal attention mask
<24> if self.causal:
<25> query_length, key_length = query_states.shape[1], key_states.shape[1]
<26> if self.has_variable("cache", "cached_key"):
<27> mask_shift = self.variables["cache"]["cache_index"]
<28> max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
<29> causal_mask = lax.dynamic_slice(
</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 1
(0, 0, mask_shift, 0),
(1, 1, query_length, max_decoder_length),
)
else:
causal_mask = self.causal_mask[:, :, :query_length, :key_length]
causal_mask = jnp.broadcast_to(
causal_mask, (batch_size,) + causal_mask.shape[1:]
)
# combine masks if needed
if attention_mask is not None and self.causal:
attention_mask = jnp.broadcast_to(
jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape
)
attention_mask = combine_masks(attention_mask, causal_mask)
elif self.causal:
attention_mask = causal_mask
elif attention_mask is not None:
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
key_states, value_states, attention_mask = self._concatenate_to_cache(
key_states, value_states, query_states, attention_mask
)
# Convert the boolean attention mask to an attention bias.
if attention_mask is not None:
# attention mask in the form of</s>
===========below chunk 1===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 2
<s> # Convert the boolean attention mask to an attention bias.
if attention_mask is not None:
# attention mask in the form of attention bias
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, float("-inf")).astype(self.dtype),
)
else:
attention_bias = None
dropout_rng = None
if not deterministic and self.dropout > 0.0:
dropout_rng = self.make_rng("dropout")
if self.config.use_cosine_attention:
# normalize q and k
query_states = query_states / (
jnp.linalg.norm(query_states, axis=-1, keepdims=True) + 1e-8
)
key_states = key_states / (
jnp.linalg.norm(key_states, axis=-1, keepdims=True) + 1e-8
)
attn_weights = dot_product_attention_weights(
query_states,
key_states,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.dropout,
broadcast_dropout=True,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
)
if self.config.use</s>
===========below chunk 2===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 3
<s>ine_attention:
# divide by tau
attn_weights = attn_weights / jnp.maximum(self.tau, 0.01)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
if self.config.head_scale:
# per Normformer
attn_output = attn_output * self.head_scale
attn_output = self._merge_heads(attn_output)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
===========unchanged ref 0===========
at: dalle_mini.model.modeling.FlaxBartAttention.setup
self.q_proj = dense(
kernel_init=deepnet_init()
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.k_proj = dense(
kernel_init=deepnet_init()
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.v_proj = dense(
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.causal_mask = make_causal_mask(
jnp.ones((1, self.config.image_length), dtype="bool"), dtype="bool"
)
at: transformers.models.bart.modeling_flax_bart.FlaxBartAttention
config: BartConfig
dropout: float = 0.0
causal: bool = False
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
_split_heads(hidden_states)
_concatenate_to_cache(key, value, query, attention_mask)
__call__(self, hidden_states: jnp.ndarray, key_value_states: Optional[jnp.ndarray]=None, attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, deterministic: bool=True) -> Tuple[jnp.ndarray]
at: typing
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.