path
stringlengths 9
117
| type
stringclasses 2
values | project
stringclasses 10
values | commit_hash
stringlengths 40
40
| commit_message
stringlengths 1
137
| ground_truth
stringlengths 0
2.74k
| main_code
stringlengths 102
3.37k
| context
stringlengths 0
14.7k
|
---|---|---|---|---|---|---|---|
dalle_mini.model.modeling/GLU.__call__
|
Modified
|
borisdayma~dalle-mini
|
f139b0be7c3b03f0520c106b12540b3fbd8e6e82
|
feat: add sinkformer + custom final ln + pre-ln (#151)
|
<4>:<add> if self.config.ln_positions in ["normformer", "cogview", "preln"]:
<del> if self.config.ln_positions in ["normformer", "cogview"]:
|
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
<0> gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
<1> self.config
<2> )
<3>
<4> if self.config.ln_positions in ["normformer", "cogview"]:
<5> x = norm(
<6> self.config.ln_type,
<7> dtype=self.dtype,
<8> epsilon=1e-05,
<9> use_scale=self.config.force_ln_scale,
<10> )(x)
<11> w = nn.Dense(
<12> self.ffn_dim,
<13> dtype=self.dtype,
<14> use_bias=False,
<15> kernel_init=deepnet_init(gain)
<16> if self.config.use_deepnet_scaling
<17> else jax.nn.initializers.normal(self.config.init_std),
<18> )(x)
<19> w = ACT2FN[self.config.activation_function](w)
<20> v = nn.Dense(
<21> self.ffn_dim,
<22> dtype=self.dtype,
<23> use_bias=False,
<24> kernel_init=deepnet_init(gain)
<25> if self.config.use_deepnet_scaling
<26> else jax.nn.initializers.normal(self.config.init_std),
<27> )(x)
<28> x = w * v
<29> if self.config.ln_positions in ["normformer"]:
<30> x = norm(
<31> self.config.ln_type,
<32> dtype=self.dtype,
<33> epsilon=1e-05,
<34> use_scale=self.config.force_ln_scale,
<35> )(x)
<36> x = nn.Dropout(rate=self.config.activation_dropout)(
<37> x, deterministic=deterministic
<38> )
<39>
<40> x</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
self.embed_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
if self.config.ln_positions in ["swinv2", "cogview"]:
x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic)
return x
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_init(gain=1)
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: dalle_mini.model.modeling.GLU
config: DalleBartConfig
ffn_dim: int
embed_dim: int
dtype: jnp.dtype = jnp.float32
is_encoder: bool = False
at: transformers.modeling_flax_utils
ACT2FN = {
"gelu": partial(nn.gelu, approximate=False),
"relu": nn.relu,
"silu": nn.swish,
"swish": nn.swish,
"gelu_new": partial(nn.gelu, approximate=True),
"quick_gelu": quick_gelu,
}
===========changed ref 0===========
<s> query: Any,
+ key: Any,
+ bias: Optional[Any] = None,
+ mask: Optional[Any] = None,
+ broadcast_dropout: bool = True,
+ dropout_rng: Optional[PRNGKey] = None,
+ dropout_rate: float = 0.0,
+ deterministic: bool = False,
+ dtype: Any = jnp.float32,
+ precision: PrecisionLike = None,
+ sinkhorn_iters: int = 1,
+ ):
+ """
+ Computes dot-product attention weights given query and key.
+
+ Adapted from flax.linen.attention.dot_product_attention_weights"
+ """
+ assert query.ndim == key.ndim, "q, k must have same rank."
+ assert query.shape[:-3] == key.shape[:-3], "q, k batch dims must match."
+ assert query.shape[-2] == key.shape[-2], "q, k num_heads must match."
+ assert query.shape[-1] == key.shape[-1], "q, k depths must match."
+
+ # calculate attention matrix
+ depth = query.shape[-1]
+ query = query / jnp.sqrt(depth).astype(dtype)
+ # attn weight shape is (batch..., num_heads, q_length, kv_length)
+ attn_weights = jnp.einsum("...qhd,...khd->...hqk", query, key, precision=precision)
+
+ # apply attention bias: masking, dropout, proximity bias, etc.
+ if bias is not None:
+ attn_weights = attn_weights + bias
+ # apply attention mask
+ if mask is not None:
+ big_neg = jnp.finfo(dtype).min
+ attn_weights = jnp.where(mask, attn_weights, big_neg)
+
+ # normalize the attention weights
+ attn_weights = jax.nn.softmax(attn_weights).astype(dtype)
</s>
===========changed ref 1===========
<s>
+ key: Any,
+ bias: Optional[Any] = None,
+ mask: Optional[Any] = None,
+ broadcast_dropout: bool = True,
+ dropout_rng: Optional[PRNGKey] = None,
+ dropout_rate: float = 0.0,
+ deterministic: bool = False,
+ dtype: Any = jnp.float32,
+ precision: PrecisionLike = None,
+ sinkhorn_iters: int = 1,
+ ):
# offset: 1
<s> normalize the attention weights
+ attn_weights = jax.nn.softmax(attn_weights).astype(dtype)
+ for i in range(sinkhorn_iters - 1):
+ axis = -2 if i % 2 == 0 else -1
+ attn_weights /= 1e-8 + jnp.sum(attn_weights, axis=axis, keepdims=True)
+
+ # apply attention dropout
+ if not deterministic and dropout_rate > 0.0:
+ keep_prob = 1.0 - dropout_rate
+ if broadcast_dropout:
+ # dropout is broadcast across the batch + head dimensions
+ dropout_shape = tuple([1] * (key.ndim - 2)) + attn_weights.shape[-2:]
+ keep = jax.random.bernoulli(dropout_rng, keep_prob, dropout_shape)
+ else:
+ keep = jax.random.bernoulli(dropout_rng, keep_prob, attn_weights.shape)
+ multiplier = keep.astype(attn_weights.dtype) / jnp.asarray(
+ keep_prob, dtype=dtype
+ )
+ attn_weights = attn_weights * multiplier
+
+ return attn_weights
+
|
dalle_mini.model.modeling/FFN.__call__
|
Modified
|
borisdayma~dalle-mini
|
f139b0be7c3b03f0520c106b12540b3fbd8e6e82
|
feat: add sinkformer + custom final ln + pre-ln (#151)
|
<3>:<add> if self.config.ln_positions in ["normformer", "cogview", "preln"]:
<del> if self.config.ln_positions in ["normformer", "cogview"]:
|
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
<0> gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
<1> self.config
<2> )
<3> if self.config.ln_positions in ["normformer", "cogview"]:
<4> x = norm(
<5> self.config.ln_type,
<6> dtype=self.dtype,
<7> epsilon=1e-05,
<8> use_scale=self.config.force_ln_scale,
<9> )(x)
<10> x = nn.Dense(
<11> self.ffn_dim,
<12> dtype=self.dtype,
<13> use_bias=False,
<14> kernel_init=deepnet_init(gain)
<15> if self.config.use_deepnet_scaling
<16> else jax.nn.initializers.normal(self.config.init_std),
<17> )(x)
<18> x = ACT2FN[self.config.activation_function](x)
<19> if self.config.ln_positions in ["normformer"]:
<20> x = norm(
<21> self.config.ln_type,
<22> dtype=self.dtype,
<23> epsilon=1e-05,
<24> use_scale=self.config.force_ln_scale,
<25> )(x)
<26> x = nn.Dropout(rate=self.config.activation_dropout)(
<27> x, deterministic=deterministic
<28> )
<29> x = nn.Dense(
<30> self.embed_dim,
<31> dtype=self.dtype,
<32> use_bias=False,
<33> kernel_init=deepnet_init(gain)
<34> if self.config.use_deepnet_scaling
<35> else jax.nn.initializers.normal(self.config.init_std),
<36> )(x)
<37> if self.config.ln_positions in ["swinv2",</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic)
return x
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_init(gain=1)
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: dalle_mini.model.modeling.FFN
config: DalleBartConfig
ffn_dim: int
embed_dim: int
dtype: jnp.dtype = jnp.float32
is_encoder: bool = False
at: transformers.modeling_flax_utils
ACT2FN = {
"gelu": partial(nn.gelu, approximate=False),
"relu": nn.relu,
"silu": nn.swish,
"swish": nn.swish,
"gelu_new": partial(nn.gelu, approximate=True),
"quick_gelu": quick_gelu,
}
===========changed ref 0===========
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
self.config
)
+ if self.config.ln_positions in ["normformer", "cogview", "preln"]:
- if self.config.ln_positions in ["normformer", "cogview"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(x)
w = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
w = ACT2FN[self.config.activation_function](w)
v = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
x = w * v
if self.config.ln_positions in ["normformer"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(x)
x = nn.Dropout(rate=self.config.activation_dropout)(
x, deterministic=deterministic
)
x = nn.Dense(
self.embed_dim,</s>
===========changed ref 1===========
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
<s>
x, deterministic=deterministic
)
x = nn.Dense(
self.embed_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
if self.config.ln_positions in ["swinv2", "cogview"]:
x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic)
return x
===========changed ref 2===========
<s> query: Any,
+ key: Any,
+ bias: Optional[Any] = None,
+ mask: Optional[Any] = None,
+ broadcast_dropout: bool = True,
+ dropout_rng: Optional[PRNGKey] = None,
+ dropout_rate: float = 0.0,
+ deterministic: bool = False,
+ dtype: Any = jnp.float32,
+ precision: PrecisionLike = None,
+ sinkhorn_iters: int = 1,
+ ):
+ """
+ Computes dot-product attention weights given query and key.
+
+ Adapted from flax.linen.attention.dot_product_attention_weights"
+ """
+ assert query.ndim == key.ndim, "q, k must have same rank."
+ assert query.shape[:-3] == key.shape[:-3], "q, k batch dims must match."
+ assert query.shape[-2] == key.shape[-2], "q, k num_heads must match."
+ assert query.shape[-1] == key.shape[-1], "q, k depths must match."
+
+ # calculate attention matrix
+ depth = query.shape[-1]
+ query = query / jnp.sqrt(depth).astype(dtype)
+ # attn weight shape is (batch..., num_heads, q_length, kv_length)
+ attn_weights = jnp.einsum("...qhd,...khd->...hqk", query, key, precision=precision)
+
+ # apply attention bias: masking, dropout, proximity bias, etc.
+ if bias is not None:
+ attn_weights = attn_weights + bias
+ # apply attention mask
+ if mask is not None:
+ big_neg = jnp.finfo(dtype).min
+ attn_weights = jnp.where(mask, attn_weights, big_neg)
+
+ # normalize the attention weights
+ attn_weights = jax.nn.softmax(attn_weights).astype(dtype)
</s>
|
dalle_mini.model.modeling/FlaxBartEncoderLayer.__call__
|
Modified
|
borisdayma~dalle-mini
|
f139b0be7c3b03f0520c106b12540b3fbd8e6e82
|
feat: add sinkformer + custom final ln + pre-ln (#151)
|
<8>:<add> if self.config.ln_positions in ["normformer", "cogview", "preln"]:
<del> if self.config.ln_positions in ["normformer", "cogview"]:
|
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
<0> res_gain = (
<1> deepnet_gain["encoder"]["alpha"](self.config)
<2> if self.config.use_deepnet_scaling
<3> else 1
<4> )
<5>
<6> embed_dim = self.config.d_model
<7> residual = hidden_states
<8> if self.config.ln_positions in ["normformer", "cogview"]:
<9> hidden_states = norm(
<10> self.config.ln_type,
<11> dtype=self.dtype,
<12> epsilon=1e-05,
<13> use_scale=self.config.force_ln_scale,
<14> )(hidden_states)
<15> hidden_states, attn_weights = FlaxBartAttention(
<16> config=self.config,
<17> embed_dim=embed_dim,
<18> num_heads=self.config.encoder_attention_heads,
<19> dropout=self.config.attention_dropout,
<20> bias=False,
<21> dtype=self.dtype,
<22> is_encoder=True,
<23> )(hidden_states=hidden_states, attention_mask=attention_mask)
<24>
<25> if self.config.ln_positions in ["normformer", "swinv2", "cogview"]:
<26> hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
<27> hidden_states
<28> )
<29> hidden_states = nn.Dropout(rate=self.config.dropout)(
<30> hidden_states, deterministic=deterministic
<31> )
<32> hidden_states = residual * res_gain + hidden_states
<33> if self.config.ln_positions in ["postln"]:
<34> </s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 1
hidden_states
)
residual = hidden_states
ff_block = (
GLU(
config=self.config,
ffn_dim=self.config.encoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=True,
)
if self.config.use_glu
else FFN(
config=self.config,
ffn_dim=self.config.encoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=True,
)
)
hidden_states = ff_block(hidden_states, deterministic=deterministic)
hidden_states = residual * res_gain + hidden_states
if self.add_norm or self.config.ln_positions in ["postln"]:
use_scale = (
self.use_scale
or self.config.ln_positions == "postln"
or self.config.force_ln_scale
)
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=use_scale,
)(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: dalle_mini.model.modeling.FlaxBartEncoderLayer
config: DalleBartConfig
dtype: jnp.dtype = jnp.float32
add_norm: bool = False
use_scale: bool = True
at: typing
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
===========changed ref 0===========
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
self.config
)
+ if self.config.ln_positions in ["normformer", "cogview", "preln"]:
- if self.config.ln_positions in ["normformer", "cogview"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(x)
x = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
x = ACT2FN[self.config.activation_function](x)
if self.config.ln_positions in ["normformer"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(x)
x = nn.Dropout(rate=self.config.activation_dropout)(
x, deterministic=deterministic
)
x = nn.Dense(
self.embed_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
if self.config.ln_positions in ["swinv2", "cogview"]:
x = norm</s>
===========changed ref 1===========
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
<s>(x)
if self.config.ln_positions in ["swinv2", "cogview"]:
x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic)
return x
===========changed ref 2===========
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
self.config
)
+ if self.config.ln_positions in ["normformer", "cogview", "preln"]:
- if self.config.ln_positions in ["normformer", "cogview"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(x)
w = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
w = ACT2FN[self.config.activation_function](w)
v = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
x = w * v
if self.config.ln_positions in ["normformer"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(x)
x = nn.Dropout(rate=self.config.activation_dropout)(
x, deterministic=deterministic
)
x = nn.Dense(
self.embed_dim,</s>
|
dalle_mini.model.modeling/FlaxBartDecoderLayer.__call__
|
Modified
|
borisdayma~dalle-mini
|
f139b0be7c3b03f0520c106b12540b3fbd8e6e82
|
feat: add sinkformer + custom final ln + pre-ln (#151)
|
<10>:<add> if self.config.ln_positions in ["normformer", "cogview", "preln"]:
<del> if self.config.ln_positions in ["normformer", "cogview"]:
|
<s> FlaxBartDecoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
<0> res_gain = (
<1> deepnet_gain["decoder"]["alpha"](self.config)
<2> if self.config.use_deepnet_scaling
<3> else 1
<4> )
<5>
<6> embed_dim = self.config.d_model
<7> residual = hidden_states
<8>
<9> # Self Attention
<10> if self.config.ln_positions in ["normformer", "cogview"]:
<11> hidden_states = norm(
<12> self.config.ln_type,
<13> dtype=self.dtype,
<14> epsilon=1e-05,
<15> use_scale=self.config.force_ln_scale,
<16> )(hidden_states)
<17> hidden_states, attn_weights = FlaxBartAttention(
<18> config=self.config,
<19> embed_dim=embed_dim,
<20> num_heads=self.config.decoder_attention_heads,
<21> dropout=self.config.attention_dropout,
<22> causal=True,
<23> bias=False,
<24> dtype=self.dtype,
<25> is_encoder=False,
<26> )(
<27> hidden_states=hidden_states,
<28> attention_mask=attention_mask,
<29> init_cache=init_cache,
<30> )
<31>
<32> if self.config.ln_positions in ["normformer", "swinv2", "cogview"]:
<33> hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
<34> hidden_states
<35> )</s>
|
===========below chunk 0===========
<s>Layer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 1
hidden_states, deterministic=deterministic
)
hidden_states = residual * res_gain + hidden_states
if self.config.ln_positions in ["postln"]:
hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
hidden_states
)
# Cross Attention
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
if self.config.ln_positions in ["normformer", "cogview"]:
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(hidden_states)
hidden_states, cross_attn_weights = FlaxBartAttention(
config=self.config,
embed_dim=embed_dim,
num_heads=self.config.decoder_attention_heads,
dropout=self.config.attention_dropout,
bias=False,
dtype=self.dtype,
is_encoder=False,
)(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
)
if self.config.ln_positions in ["normformer", "swinv2", "cogview"]:
hidden_states = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-</s>
===========below chunk 1===========
<s>Layer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 2
<s>:
hidden_states = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05
)(hidden_states)
hidden_states = nn.Dropout(rate=self.config.dropout)(
hidden_states, deterministic=deterministic
)
hidden_states = residual * res_gain + hidden_states
if self.config.ln_positions in ["postln"]:
hidden_states = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05
)(hidden_states)
# Feed forward
residual = hidden_states
ff_block = (
GLU(
config=self.config,
ffn_dim=self.config.decoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=False,
)
if self.config.use_glu
else FFN(
config=self.config,
ffn_dim=self.config.decoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=False,
)
)
hidden_states = ff_block(hidden_states, deterministic=deterministic)
hidden_states = residual * res_gain + hidden_states
if self.add_norm or self.config.ln_positions in ["post</s>
===========below chunk 2===========
<s>Layer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 3
<s>:
use_scale = (
self.use_scale
or self.config.ln_positions == "postln"
or self.config.force_ln_scale
)
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=use_scale,
)(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights, cross_attn_weights)
return outputs
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: dalle_mini.model.modeling.FlaxBartDecoderLayer
config: DalleBartConfig
dtype: jnp.dtype = jnp.float32
add_norm: bool = False
use_scale: bool = False
at: typing
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
|
dalle_mini.model.modeling/FlaxBartEncoderLayerCollection.__call__
|
Modified
|
borisdayma~dalle-mini
|
f139b0be7c3b03f0520c106b12540b3fbd8e6e82
|
feat: add sinkformer + custom final ln + pre-ln (#151)
|
<14>:<del> # not needed for other models which use layernorm before x-attention
<15>:<del> # ignored args for deepnet which always add a norm with scale
<16>:<del> add_norm = self.config.force_final_ln_encoder or (
<17>:<add> add_norm = (
<add> self.config.ln_positions == "swinv2" and ((i + 1) % 6 == 0)
<del> self.config.ln_positions == "swinv2"
<18>:<del> and ((i == n_layers - 1) or ((i + 1) % 6 == 0))
<19>:<del> )
<20>:<add> ) or (self.config.use_final_ln_encoder and (i == n_layers - 1))
|
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayerCollection(nn.Module):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
<0> all_hidden_states = () if output_hidden_states else None
<1> all_self_attns = () if output_attentions else None
<2>
<3> n_layers = self.config.encoder_layers
<4> layer = (
<5> remat(FlaxBartEncoderLayer, static_argnums=(2, 3))
<6> if self.config.gradient_checkpointing
<7> else FlaxBartEncoderLayer
<8> )
<9> for i in range(n_layers):
<10> if output_hidden_states:
<11> all_hidden_states += (hidden_states,)
<12> # final layernorm on the output of the last layer
<13> # or every 6 layers for Swin v2
<14> # not needed for other models which use layernorm before x-attention
<15> # ignored args for deepnet which always add a norm with scale
<16> add_norm = self.config.force_final_ln_encoder or (
<17> self.config.ln_positions == "swinv2"
<18> and ((i == n_layers - 1) or ((i + 1) % 6 == 0))
<19> )
<20> # we don't need to scale the norm for the last layer
<21> use_scale = i != n_layers - 1
<22> layer_outputs = layer(
<23> self.config, dtype=self.dtype, add_norm=add_norm, use_scale=use_scale
<24> )(
<25> hidden_states,
<26> attention_mask,
<27> output_attentions,
<28> deterministic,
<29> )
<30> hidden_states = layer_outputs[0]
<31> if output_attentions:
<32> all_self_attns += (layer_outputs[1</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayerCollection(nn.Module):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# offset: 1
# add hidden states from the last layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
outputs = [
hidden_states,
all_hidden_states,
all_self_attns,
]
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
===========unchanged ref 0===========
at: dalle_mini.model.modeling
remat = nn_partitioning.remat
at: dalle_mini.model.modeling.FlaxBartEncoderLayerCollection
config: DalleBartConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
at: transformers.modeling_flax_outputs
FlaxBaseModelOutput(**kwargs: _VT)
FlaxBaseModelOutput(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
FlaxBaseModelOutput(map: Mapping[_KT, _VT], **kwargs: _VT)
===========changed ref 0===========
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
self.config
)
+ if self.config.ln_positions in ["normformer", "cogview", "preln"]:
- if self.config.ln_positions in ["normformer", "cogview"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(x)
x = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
x = ACT2FN[self.config.activation_function](x)
if self.config.ln_positions in ["normformer"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(x)
x = nn.Dropout(rate=self.config.activation_dropout)(
x, deterministic=deterministic
)
x = nn.Dense(
self.embed_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
if self.config.ln_positions in ["swinv2", "cogview"]:
x = norm</s>
===========changed ref 1===========
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
<s>(x)
if self.config.ln_positions in ["swinv2", "cogview"]:
x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic)
return x
===========changed ref 2===========
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
self.config
)
+ if self.config.ln_positions in ["normformer", "cogview", "preln"]:
- if self.config.ln_positions in ["normformer", "cogview"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(x)
w = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
w = ACT2FN[self.config.activation_function](w)
v = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
x = w * v
if self.config.ln_positions in ["normformer"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(x)
x = nn.Dropout(rate=self.config.activation_dropout)(
x, deterministic=deterministic
)
x = nn.Dense(
self.embed_dim,</s>
===========changed ref 3===========
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
<s>
x, deterministic=deterministic
)
x = nn.Dense(
self.embed_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
if self.config.ln_positions in ["swinv2", "cogview"]:
x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic)
return x
|
dalle_mini.model.modeling/FlaxBartDecoderLayerCollection.__call__
|
Modified
|
borisdayma~dalle-mini
|
f139b0be7c3b03f0520c106b12540b3fbd8e6e82
|
feat: add sinkformer + custom final ln + pre-ln (#151)
|
<18>:<add> add_norm = (
<del> add_norm = (i == n_layers - 1) or (
<19>:<add> self.config.ln_positions == "swinv2" and ((i + 1) % 6 == 0)
<del> (self.config.ln_positions == "swinv2") and ((i + 1) % 6 == 0)
<20>:<add> ) or (self.config.use_final_ln_decoder and (i == n_layers - 1))
<del> )
|
<s>Collection(nn.Module):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
<0> # decoder layers
<1> all_hidden_states = () if output_hidden_states else None
<2> all_self_attns = () if output_attentions else None
<3> all_cross_attentions = (
<4> () if (output_attentions and encoder_hidden_states is not None) else None
<5> )
<6>
<7> n_layers = self.config.decoder_layers
<8> layer = (
<9> remat(FlaxBartDecoderLayer, static_argnums=(4, 5, 6))
<10> if self.config.gradient_checkpointing
<11> else FlaxBartDecoderLayer
<12> )
<13> for i in range(n_layers):
<14> if output_hidden_states:
<15> all_hidden_states += (hidden_states,)
<16> # final layernorm on the output of the last layer
<17> # or every 6 layers for Swin v2
<18> add_norm = (i == n_layers - 1) or (
<19> (self.config.ln_positions == "swinv2") and ((i + 1) % 6 == 0)
<20> )
<21> # we don't need to scale the norm for the last layer
<22> use_scale = i != n_layers - 1
<23> layer_outputs = layer(
<24> self.config, dtype=self.dtype, add_norm=add_norm, use_scale=use_scale
<25> )(
<26> hidden_states,
<27> attention_mask,
<28> encoder_hidden_states,
<29> encoder_attention_mask,
<30> init_cache,
<31> output_att</s>
|
===========below chunk 0===========
<s>):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# offset: 1
deterministic,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
outputs = [
hidden_states,
all_hidden_states,
all_self_attns,
all_cross_attentions,
]
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
===========unchanged ref 0===========
at: dalle_mini.model.modeling
remat = nn_partitioning.remat
at: dalle_mini.model.modeling.FlaxBartDecoderLayerCollection
config: DalleBartConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
at: transformers.modeling_flax_outputs
FlaxBaseModelOutputWithPastAndCrossAttentions(**kwargs: _VT)
FlaxBaseModelOutputWithPastAndCrossAttentions(map: Mapping[_KT, _VT], **kwargs: _VT)
FlaxBaseModelOutputWithPastAndCrossAttentions(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
===========changed ref 0===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayerCollection(nn.Module):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
n_layers = self.config.encoder_layers
layer = (
remat(FlaxBartEncoderLayer, static_argnums=(2, 3))
if self.config.gradient_checkpointing
else FlaxBartEncoderLayer
)
for i in range(n_layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
# final layernorm on the output of the last layer
# or every 6 layers for Swin v2
- # not needed for other models which use layernorm before x-attention
- # ignored args for deepnet which always add a norm with scale
- add_norm = self.config.force_final_ln_encoder or (
+ add_norm = (
+ self.config.ln_positions == "swinv2" and ((i + 1) % 6 == 0)
- self.config.ln_positions == "swinv2"
- and ((i == n_layers - 1) or ((i + 1) % 6 == 0))
- )
+ ) or (self.config.use_final_ln_encoder and (i == n_layers - 1))
# we don't need to scale the norm for the last layer
use_scale = i != n_layers - 1
layer_outputs = layer(
self.config, dtype=self.dtype, add_norm=add_norm, use_scale=use_scale
)(
hidden_states,
attention_mask,
output_attentions,
deterministic,
)</s>
===========changed ref 1===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayerCollection(nn.Module):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# offset: 1
<s>
)(
hidden_states,
attention_mask,
output_attentions,
deterministic,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
# add hidden states from the last layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
outputs = [
hidden_states,
all_hidden_states,
all_self_attns,
]
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
===========changed ref 2===========
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
self.config
)
+ if self.config.ln_positions in ["normformer", "cogview", "preln"]:
- if self.config.ln_positions in ["normformer", "cogview"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(x)
x = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
x = ACT2FN[self.config.activation_function](x)
if self.config.ln_positions in ["normformer"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(x)
x = nn.Dropout(rate=self.config.activation_dropout)(
x, deterministic=deterministic
)
x = nn.Dense(
self.embed_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
if self.config.ln_positions in ["swinv2", "cogview"]:
x = norm</s>
|
tools.train.train/ModelArguments.get_opt_state
|
Modified
|
borisdayma~dalle-mini
|
02b2308f9fdfc64ab539761175f346b428d41ff7
|
feat(train): google-cloud-storage is optional
|
<20>:<add> assert (
<add> storage is not None
<add> ), 'Could not find google.storage. Install with "pip install google-cloud-storage"'
|
# module: tools.train.train
@dataclass
class ModelArguments:
def get_opt_state(self):
<0> with tempfile.TemporaryDirectory() as tmp_dir: # avoid multiple artifact copies
<1> if self.restore_state is True:
<2> # wandb artifact
<3> state_artifact = self.model_name_or_path.replace(
<4> "/model-", "/state-", 1
<5> )
<6> if jax.process_index() == 0:
<7> artifact = wandb.run.use_artifact(state_artifact)
<8> else:
<9> artifact = wandb.Api().artifact(state_artifact)
<10> if artifact.metadata.get("bucket_path"):
<11> # we will read directly file contents
<12> self.restore_state = artifact.metadata["bucket_path"]
<13> else:
<14> artifact_dir = artifact.download(tmp_dir)
<15> self.restore_state = str(Path(artifact_dir) / "opt_state.msgpack")
<16>
<17> if self.restore_state.startswith("gs://"):
<18> bucket_path = Path(self.restore_state[5:]) / "opt_state.msgpack"
<19> bucket, blob_name = str(bucket_path).split("/", 1)
<20> client = storage.Client()
<21> bucket = client.bucket(bucket)
<22> blob = bucket.blob(blob_name)
<23> return blob.download_as_bytes()
<24>
<25> with Path(self.restore_state).open("rb") as f:
<26> return f.read()
<27>
|
===========unchanged ref 0===========
at: pathlib
Path()
at: tempfile
TemporaryDirectory(suffix: Optional[AnyStr]=..., prefix: Optional[AnyStr]=..., dir: Optional[_DirT[AnyStr]]=...)
at: tools.train.train.ModelArguments
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. "
"Don't set if you want to train a model from scratch. "
"W&B artifact references are supported in addition to the sources supported by `PreTrainedModel`."
},
)
config_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained config name or path if not the same as model_name_or_path"
},
)
tokenizer_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained tokenizer name or path if not the same as model_name_or_path"
},
)
dtype: Optional[str] = field(
default="float32",
metadata={
"help": "Floating-point format in which the computations will be performed (not the model weights). Choose one of `[float32, float16, bfloat16]`."
},
)
restore_state: Optional[bool] = field(
default=False,
metadata={
"help": "Restore optimizer and training state. Can be True (will retrieve associated wandb artifact), a local directory or a Google bucket path."
},
)
at: tools.train.train.ModelArguments.get_metadata
artifact = wandb.run.use_artifact(self.model_name_or_path)
artifact = wandb.Api().artifact(self.model_name_or_path)
at: wandb
Api = PublicApi
run: Optional["wandb_sdk.wandb_run.Run"] = None
===========unchanged ref 1===========
at: wandb.apis.public.Api
_HTTP_TIMEOUT = env.get_http_timeout(9)
VIEWER_QUERY = gql(
"""
query Viewer{
viewer {
id
flags
entity
username
email
admin
apiKeys {
edges {
node {
id
name
description
}
}
}
teams {
edges {
node {
name
}
}
}
}
}
"""
)
USERS_QUERY = gql(
"""
query SearchUsers($query: String) {
users(query: $query) {
edges {
node {
id
flags
entity
admin
email
deletedAt
username
apiKeys {
edges {
node {
id
name
description
}
}
}
teams {
edges {
node {
name
}
}
}
}
}
}
}
"""
)
CREATE_PROJECT = gql(
"""
mutation upsertModel(
$description: String
$entityName: String
$id: String
$name: String
$framework: String
$access: String
$views: JSONString
) {
upsertModel(
input: {
description: $description
entityName: $entityName
id: $id
name: $name
framework: $framework
access: $access
views: $views
}
) {
project {
id
name
entityName
description
access
views
}
model {
id
name
entityName
description
access
views
}
inserted
}
}
"""
)
artifact(name, type=None)
===========unchanged ref 2===========
at: wandb.apis.public.Artifact
QUERY = gql(
"""
query ArtifactWithCurrentManifest(
$id: ID!,
) {
artifact(id: $id) {
currentManifest {
id
file {
id
directUrl
}
}
...ArtifactFragment
}
}
%s
"""
% ARTIFACT_FRAGMENT
)
at: wandb.sdk.wandb_run.Run
_telemetry_obj: telemetry.TelemetryRecord
_telemetry_obj_active: bool
_telemetry_obj_dirty: bool
_telemetry_obj_flushed: bytes
_teardown_hooks: List[TeardownHook]
_tags: Optional[Tuple[Any, ...]]
_entity: Optional[str]
_project: Optional[str]
_group: Optional[str]
_job_type: Optional[str]
_name: Optional[str]
_notes: Optional[str]
_run_obj: Optional[RunRecord]
_run_obj_offline: Optional[RunRecord]
_backend: Optional["wandb.sdk.backend.backend.Backend"]
_internal_run_interface: Optional[
Union[
"wandb.sdk.interface.interface_queue.InterfaceQueue",
"wandb.sdk.interface.interface_grpc.InterfaceGrpc",
]
]
_wl: Optional[_WandbSetup]
_out_redir: Optional[redirect.RedirectBase]
_err_redir: Optional[redirect.RedirectBase]
_redirect_cb: Optional[Callable[[str, str], None]]
_redirect_raw_cb: Optional[Callable[[str, str], None]]
_output_writer: Optional["filesystem.CRDedupedFile"]
_quiet: Optional[bool]
_atexit_cleanup_called: bool
_hooks: Optional[ExitHooks]
===========unchanged ref 3===========
_exit_code: Optional[int]
_run_status_checker: Optional[RunStatusChecker]
_check_version: Optional["CheckVersionResponse"]
_sampled_history: Optional["SampledHistoryResponse"]
_final_summary: Optional["GetSummaryResponse"]
_poll_exit_handle: Optional[MailboxHandle]
_poll_exit_response: Optional[PollExitResponse]
_server_info_response: Optional[ServerInfoResponse]
_stdout_slave_fd: Optional[int]
_stderr_slave_fd: Optional[int]
_artifact_slots: List[str]
_init_pid: int
_attach_pid: int
_iface_pid: Optional[int]
_iface_port: Optional[int]
_attach_id: Optional[str]
_is_attached: bool
_settings: Settings
_launch_artifacts: Optional[Dict[str, Any]]
_printer: Union["PrinterTerm", "PrinterJupyter"]
use_artifact(self, artifact_or_name: Union[str, public.Artifact, Artifact], type: Optional[str]=None, aliases: Optional[List[str]]=None, use_as: Optional[str]=None) -> Union[public.Artifact, Artifact]
===========changed ref 0===========
# module: tools.train.train
+ try:
+ from google.cloud import storage
+ except:
+ storage = None
+
cc.initialize_cache("./jax_cache", max_cache_size_bytes=10 * 2**30)
logger = logging.getLogger(__name__)
|
tools.train.train/TrainingArguments.__post_init__
|
Modified
|
borisdayma~dalle-mini
|
02b2308f9fdfc64ab539761175f346b428d41ff7
|
feat(train): google-cloud-storage is optional
|
<4>:<add> if self.output_dir.startswith("gs://"):
<add> assert (
<add> storage is not None
<add> ), 'Could not find google.storage. Install with "pip install google-cloud-storage"'
|
# module: tools.train.train
@dataclass
class TrainingArguments:
def __post_init__(self):
<0> if self.assert_TPU_available:
<1> assert (
<2> jax.local_device_count() == 8
<3> ), "TPUs in use, please check running processes"
<4> assert self.optim in [
<5> "distributed_shampoo",
<6> "adam",
<7> "adafactor",
<8> ], f"Selected optimizer not supported: {self.optim}"
<9> assert self.graft_type in [
<10> "rmsprop_normalized",
<11> "rmsprop",
<12> "adagrad",
<13> "adagrad_normalized",
<14> "sgd",
<15> "sqrt_n",
<16> ], f"Selected graft type not supported: {self.graft_type}"
<17> assert self.lr_decay in [
<18> None,
<19> "linear",
<20> "exponential",
<21> ], f"Selected learning rate decay not supported: {self.lr_decay}"
<22> if self.per_device_eval_batch_size is None:
<23> self.per_device_eval_batch_size = self.per_device_train_batch_size
<24> if self.log_norm_steps is True:
<25> self.log_norm_steps = self.logging_steps
<26> if (
<27> os.path.exists(self.output_dir)
<28> and os.listdir(self.output_dir)
<29> and self.do_train
<30> and not self.overwrite_output_dir
<31> ):
<32> raise ValueError(
<33> f"Output directory ({self.output_dir}) already exists and is not empty."
<34> "Use --overwrite_output_dir to overcome."
<35> )
<36> assert (
<37> self.mp_devices > 0
<38> ), f"Number of devices for model parallelism must be > 0"
<39> assert (
<40> jax.device_count() % self.mp_devices == 0
<41> ), f"Number of available devices ({jax.device_</s>
|
===========below chunk 0===========
# module: tools.train.train
@dataclass
class TrainingArguments:
def __post_init__(self):
# offset: 1
self.dp_devices = jax.device_count() // self.mp_devices
===========unchanged ref 0===========
at: os
listdir(path: bytes) -> List[bytes]
listdir(path: int) -> List[str]
listdir(path: Optional[str]=...) -> List[str]
listdir(path: _PathLike[str]) -> List[str]
at: os.path
exists(path: Union[AnyStr, _PathLike[AnyStr]]) -> bool
===========changed ref 0===========
# module: tools.train.train
+ try:
+ from google.cloud import storage
+ except:
+ storage = None
+
cc.initialize_cache("./jax_cache", max_cache_size_bytes=10 * 2**30)
logger = logging.getLogger(__name__)
===========changed ref 1===========
# module: tools.train.train
@dataclass
class ModelArguments:
def get_opt_state(self):
with tempfile.TemporaryDirectory() as tmp_dir: # avoid multiple artifact copies
if self.restore_state is True:
# wandb artifact
state_artifact = self.model_name_or_path.replace(
"/model-", "/state-", 1
)
if jax.process_index() == 0:
artifact = wandb.run.use_artifact(state_artifact)
else:
artifact = wandb.Api().artifact(state_artifact)
if artifact.metadata.get("bucket_path"):
# we will read directly file contents
self.restore_state = artifact.metadata["bucket_path"]
else:
artifact_dir = artifact.download(tmp_dir)
self.restore_state = str(Path(artifact_dir) / "opt_state.msgpack")
if self.restore_state.startswith("gs://"):
bucket_path = Path(self.restore_state[5:]) / "opt_state.msgpack"
bucket, blob_name = str(bucket_path).split("/", 1)
+ assert (
+ storage is not None
+ ), 'Could not find google.storage. Install with "pip install google-cloud-storage"'
client = storage.Client()
bucket = client.bucket(bucket)
blob = bucket.blob(blob_name)
return blob.download_as_bytes()
with Path(self.restore_state).open("rb") as f:
return f.read()
|
dalle_mini.model.configuration/DalleBartConfig.__init__
|
Modified
|
borisdayma~dalle-mini
|
361a994d86d90f758a35b8f94b5588e58b7a52e2
|
feat(model): allow bias (#152)
|
<4>:<add> self.use_bias = use_bias
<del> self.use_head_scale = use_head_scale # per Normformer
<19>:<add> self.use_head_scale = use_head_scale
|
<s>, # from "Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation"
sinkhorn_iters=1, # used in SinkFormers
use_final_ln_encoder=False, # final layer normalization in encoder
use_final_ln_decoder=False, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
force_ln_scale=False, # force scale in layernorm even when followed by dense layers
**kwargs,
):
<0> # text normalizer
<1> self.normalize_text = normalize_text
<2>
<3> # transformer variants
<4> self.use_head_scale = use_head_scale # per Normformer
<5> assert ln_type in [
<6> "rmsnorm",
<7> "layernorm",
<8> ], "ln_type must be 'rmsnorm' or 'layernorm'"
<9> self.ln_type = ln_type
<10> if ln_positions == "deepnet":
<11> ln_positions = "postln"
<12> assert ln_positions in [
<13> "normformer",
<14> "swinv2",
<15> "cogview",
<16> "postln",
<17> "preln",
<18> ], "ln_positions must be 'normformer', 'swinv2', 'cogview', 'postln', 'preln'"
<19> assert use_alibi is False, "use_alibi is not supported yet"
<20> self.ln_positions = ln_positions
<21> self.use_cosine_attention = use_cosine_attention
<22> self.tau_init = tau_init
<23> self.use_deepnet_scaling = use_deepnet_scaling
<24> self.use_glu = use_glu
<25> self.use_alibi = use_alibi
<26> self.sinkhorn_iters = sinkhorn_iters
<27> if ln_positions == "postln":
<28> assert (
<29> use_final_ln_encoder
<30> ), "use_final_ln_encoder must be True when ln_positions is 'postln</s>
|
===========below chunk 0===========
<s>Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation"
sinkhorn_iters=1, # used in SinkFormers
use_final_ln_encoder=False, # final layer normalization in encoder
use_final_ln_decoder=False, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
force_ln_scale=False, # force scale in layernorm even when followed by dense layers
**kwargs,
):
# offset: 1
assert (
use_final_ln_decoder
), "use_final_ln_decoder must be True when ln_positions is 'postln'"
self.use_final_ln_encoder = use_final_ln_encoder
self.use_final_ln_decoder = use_final_ln_decoder
self.force_ln_scale = force_ln_scale
# common parameters
self.encoder_vocab_size = encoder_vocab_size
self.image_vocab_size = image_vocab_size
self.image_length = image_length
self.max_text_length = max_text_length
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.use_cache = use_cache
self.gradient_checkpointing = gradient_checkpointing
self.scale_embedding = (
scale_embedding # scale factor will be sqrt(d_model) if True
)
# special token id's are appended to vocab if not provided
decoder_start_token_id = kwargs.pop("</s>
===========below chunk 1===========
<s>Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation"
sinkhorn_iters=1, # used in SinkFormers
use_final_ln_encoder=False, # final layer normalization in encoder
use_final_ln_decoder=False, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
force_ln_scale=False, # force scale in layernorm even when followed by dense layers
**kwargs,
):
# offset: 2
<s> )
# special token id's are appended to vocab if not provided
decoder_start_token_id = kwargs.pop("decoder_start_token_id", image_vocab_size)
bos_token_id = kwargs.pop("bos_token_id", image_vocab_size)
pad_token_id = kwargs.pop("pad_token_id", image_vocab_size)
eos_token_id = kwargs.pop("eos_token_id", image_vocab_size)
# we generate to image_length + 1 (for bos) by default
min_length = kwargs.pop("min_length", image_length + 1)
max_length = kwargs.pop("max_length", image_length + 1)
super().__init__(
# args required in parent class
is_encoder_decoder=is_encoder_decoder,
tie_word_embeddings=tie_word_embeddings,
forced_eos_token_id=forced_eos_token_id,
decoder_start_token_id=decoder_start_token_id,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
min_length=min_length,
max_length=max_length,
do_sample=do_sample,
**kwargs,
)
# ensure</s>
===========below chunk 2===========
<s>Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation"
sinkhorn_iters=1, # used in SinkFormers
use_final_ln_encoder=False, # final layer normalization in encoder
use_final_ln_decoder=False, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
force_ln_scale=False, # force scale in layernorm even when followed by dense layers
**kwargs,
):
# offset: 3
<s> for BART CNN models
if self.forced_bos_token_id is None and kwargs.get(
"force_bos_token_to_be_generated", False
):
self.forced_bos_token_id = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions."
"The config can simply be saved and uploaded again to be fixed."
)
===========unchanged ref 0===========
at: _warnings
warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None
warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None
at: transformers.configuration_utils.PretrainedConfig
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
_auto_class: Optional[str] = None
__init__(**kwargs)
__init__(self, **kwargs)
at: transformers.configuration_utils.PretrainedConfig.__init__
self.bos_token_id = kwargs.pop("bos_token_id", None)
at: typing.Mapping
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
get(key: _KT) -> Optional[_VT_co]
at: typing.MutableMapping
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
pop(key: _KT) -> _VT
===========changed ref 0===========
# module: dalle_mini
+ __version__ = "0.0.4"
- __version__ = "0.0.3"
|
dalle_mini.model.modeling/GLU.__call__
|
Modified
|
borisdayma~dalle-mini
|
361a994d86d90f758a35b8f94b5588e58b7a52e2
|
feat(model): allow bias (#152)
|
<14>:<add> use_bias=self.config.use_bias,
<del> use_bias=False,
<23>:<add> use_bias=self.config.use_bias,
<del> use_bias=False,
|
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
<0> gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
<1> self.config
<2> )
<3>
<4> if self.config.ln_positions in ["normformer", "cogview", "preln"]:
<5> x = norm(
<6> self.config.ln_type,
<7> dtype=self.dtype,
<8> epsilon=1e-05,
<9> use_scale=self.config.force_ln_scale,
<10> )(x)
<11> w = nn.Dense(
<12> self.ffn_dim,
<13> dtype=self.dtype,
<14> use_bias=False,
<15> kernel_init=deepnet_init(gain)
<16> if self.config.use_deepnet_scaling
<17> else jax.nn.initializers.normal(self.config.init_std),
<18> )(x)
<19> w = ACT2FN[self.config.activation_function](w)
<20> v = nn.Dense(
<21> self.ffn_dim,
<22> dtype=self.dtype,
<23> use_bias=False,
<24> kernel_init=deepnet_init(gain)
<25> if self.config.use_deepnet_scaling
<26> else jax.nn.initializers.normal(self.config.init_std),
<27> )(x)
<28> x = w * v
<29> if self.config.ln_positions in ["normformer"]:
<30> x = norm(
<31> self.config.ln_type,
<32> dtype=self.dtype,
<33> epsilon=1e-05,
<34> use_scale=self.config.force_ln_scale,
<35> )(x)
<36> x = nn.Dropout(rate=self.config.activation_dropout)(
<37> x, deterministic=deterministic
<38> )
<39> </s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
self.embed_dim,
dtype=self.dtype,
use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
if self.config.ln_positions in ["swinv2", "cogview"]:
x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic)
return x
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_init(gain=1)
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: dalle_mini.model.modeling.GLU
config: DalleBartConfig
ffn_dim: int
embed_dim: int
dtype: jnp.dtype = jnp.float32
is_encoder: bool = False
at: transformers.modeling_flax_utils
ACT2FN = {
"gelu": partial(nn.gelu, approximate=False),
"relu": nn.relu,
"silu": nn.swish,
"swish": nn.swish,
"gelu_new": partial(nn.gelu, approximate=True),
"quick_gelu": quick_gelu,
}
===========changed ref 0===========
# module: dalle_mini
+ __version__ = "0.0.4"
- __version__ = "0.0.3"
===========changed ref 1===========
<s>, # from "Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation"
sinkhorn_iters=1, # used in SinkFormers
use_final_ln_encoder=False, # final layer normalization in encoder
use_final_ln_decoder=False, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
force_ln_scale=False, # force scale in layernorm even when followed by dense layers
**kwargs,
):
# text normalizer
self.normalize_text = normalize_text
# transformer variants
+ self.use_bias = use_bias
- self.use_head_scale = use_head_scale # per Normformer
assert ln_type in [
"rmsnorm",
"layernorm",
], "ln_type must be 'rmsnorm' or 'layernorm'"
self.ln_type = ln_type
if ln_positions == "deepnet":
ln_positions = "postln"
assert ln_positions in [
"normformer",
"swinv2",
"cogview",
"postln",
"preln",
], "ln_positions must be 'normformer', 'swinv2', 'cogview', 'postln', 'preln'"
+ self.use_head_scale = use_head_scale
assert use_alibi is False, "use_alibi is not supported yet"
self.ln_positions = ln_positions
self.use_cosine_attention = use_cosine_attention
self.tau_init = tau_init
self.use_deepnet_scaling = use_deepnet_scaling
self.use_glu = use_glu
self.use_alibi = use_alibi
self.sinkhorn_iters = sinkhorn_iters
if ln_positions == "postln":
assert (
use_final_ln_encoder
), "use_final_ln_encoder must be True when ln_positions is 'postln'"
</s>
===========changed ref 2===========
<s>Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation"
sinkhorn_iters=1, # used in SinkFormers
use_final_ln_encoder=False, # final layer normalization in encoder
use_final_ln_decoder=False, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
force_ln_scale=False, # force scale in layernorm even when followed by dense layers
**kwargs,
):
# offset: 1
<s>final_ln_encoder
), "use_final_ln_encoder must be True when ln_positions is 'postln'"
assert (
use_final_ln_decoder
), "use_final_ln_decoder must be True when ln_positions is 'postln'"
self.use_final_ln_encoder = use_final_ln_encoder
self.use_final_ln_decoder = use_final_ln_decoder
self.force_ln_scale = force_ln_scale
# common parameters
self.encoder_vocab_size = encoder_vocab_size
self.image_vocab_size = image_vocab_size
self.image_length = image_length
self.max_text_length = max_text_length
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.use_cache = use_cache
self.gradient_checkpointing = gradient</s>
|
dalle_mini.model.modeling/FFN.__call__
|
Modified
|
borisdayma~dalle-mini
|
361a994d86d90f758a35b8f94b5588e58b7a52e2
|
feat(model): allow bias (#152)
|
<13>:<add> use_bias=self.config.use_bias,
<del> use_bias=False,
<32>:<add> use_bias=self.config.use_bias,
<del> use_bias=False,
|
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
<0> gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
<1> self.config
<2> )
<3> if self.config.ln_positions in ["normformer", "cogview", "preln"]:
<4> x = norm(
<5> self.config.ln_type,
<6> dtype=self.dtype,
<7> epsilon=1e-05,
<8> use_scale=self.config.force_ln_scale,
<9> )(x)
<10> x = nn.Dense(
<11> self.ffn_dim,
<12> dtype=self.dtype,
<13> use_bias=False,
<14> kernel_init=deepnet_init(gain)
<15> if self.config.use_deepnet_scaling
<16> else jax.nn.initializers.normal(self.config.init_std),
<17> )(x)
<18> x = ACT2FN[self.config.activation_function](x)
<19> if self.config.ln_positions in ["normformer"]:
<20> x = norm(
<21> self.config.ln_type,
<22> dtype=self.dtype,
<23> epsilon=1e-05,
<24> use_scale=self.config.force_ln_scale,
<25> )(x)
<26> x = nn.Dropout(rate=self.config.activation_dropout)(
<27> x, deterministic=deterministic
<28> )
<29> x = nn.Dense(
<30> self.embed_dim,
<31> dtype=self.dtype,
<32> use_bias=False,
<33> kernel_init=deepnet_init(gain)
<34> if self.config.use_deepnet_scaling
<35> else jax.nn.initializers.normal(self.config.init_std),
<36> )(x)
<37> if self.config.ln_positions in ["</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic)
return x
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_init(gain=1)
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: dalle_mini.model.modeling.FFN
config: DalleBartConfig
ffn_dim: int
embed_dim: int
dtype: jnp.dtype = jnp.float32
is_encoder: bool = False
at: transformers.modeling_flax_utils
ACT2FN = {
"gelu": partial(nn.gelu, approximate=False),
"relu": nn.relu,
"silu": nn.swish,
"swish": nn.swish,
"gelu_new": partial(nn.gelu, approximate=True),
"quick_gelu": quick_gelu,
}
===========changed ref 0===========
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
self.config
)
if self.config.ln_positions in ["normformer", "cogview", "preln"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(x)
w = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
+ use_bias=self.config.use_bias,
- use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
w = ACT2FN[self.config.activation_function](w)
v = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
+ use_bias=self.config.use_bias,
- use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
x = w * v
if self.config.ln_positions in ["normformer"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(x)
x = nn.Dropout(rate=self.config.activation_dropout)(
x, deterministic=deterministic
)
x = nn.Dense</s>
===========changed ref 1===========
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
<s>=self.config.activation_dropout)(
x, deterministic=deterministic
)
x = nn.Dense(
self.embed_dim,
dtype=self.dtype,
+ use_bias=self.config.use_bias,
- use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
if self.config.ln_positions in ["swinv2", "cogview"]:
x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic)
return x
===========changed ref 2===========
# module: dalle_mini
+ __version__ = "0.0.4"
- __version__ = "0.0.3"
===========changed ref 3===========
<s>, # from "Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation"
sinkhorn_iters=1, # used in SinkFormers
use_final_ln_encoder=False, # final layer normalization in encoder
use_final_ln_decoder=False, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
force_ln_scale=False, # force scale in layernorm even when followed by dense layers
**kwargs,
):
# text normalizer
self.normalize_text = normalize_text
# transformer variants
+ self.use_bias = use_bias
- self.use_head_scale = use_head_scale # per Normformer
assert ln_type in [
"rmsnorm",
"layernorm",
], "ln_type must be 'rmsnorm' or 'layernorm'"
self.ln_type = ln_type
if ln_positions == "deepnet":
ln_positions = "postln"
assert ln_positions in [
"normformer",
"swinv2",
"cogview",
"postln",
"preln",
], "ln_positions must be 'normformer', 'swinv2', 'cogview', 'postln', 'preln'"
+ self.use_head_scale = use_head_scale
assert use_alibi is False, "use_alibi is not supported yet"
self.ln_positions = ln_positions
self.use_cosine_attention = use_cosine_attention
self.tau_init = tau_init
self.use_deepnet_scaling = use_deepnet_scaling
self.use_glu = use_glu
self.use_alibi = use_alibi
self.sinkhorn_iters = sinkhorn_iters
if ln_positions == "postln":
assert (
use_final_ln_encoder
), "use_final_ln_encoder must be True when ln_positions is 'postln'"
</s>
|
dalle_mini.model.modeling/FlaxBartEncoderLayer.__call__
|
Modified
|
borisdayma~dalle-mini
|
361a994d86d90f758a35b8f94b5588e58b7a52e2
|
feat(model): allow bias (#152)
|
<20>:<add> bias=self.config.use_bias,
<del> bias=False,
|
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
<0> res_gain = (
<1> deepnet_gain["encoder"]["alpha"](self.config)
<2> if self.config.use_deepnet_scaling
<3> else 1
<4> )
<5>
<6> embed_dim = self.config.d_model
<7> residual = hidden_states
<8> if self.config.ln_positions in ["normformer", "cogview", "preln"]:
<9> hidden_states = norm(
<10> self.config.ln_type,
<11> dtype=self.dtype,
<12> epsilon=1e-05,
<13> use_scale=self.config.force_ln_scale,
<14> )(hidden_states)
<15> hidden_states, attn_weights = FlaxBartAttention(
<16> config=self.config,
<17> embed_dim=embed_dim,
<18> num_heads=self.config.encoder_attention_heads,
<19> dropout=self.config.attention_dropout,
<20> bias=False,
<21> dtype=self.dtype,
<22> is_encoder=True,
<23> )(hidden_states=hidden_states, attention_mask=attention_mask)
<24>
<25> if self.config.ln_positions in ["normformer", "swinv2", "cogview"]:
<26> hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
<27> hidden_states
<28> )
<29> hidden_states = nn.Dropout(rate=self.config.dropout)(
<30> hidden_states, deterministic=deterministic
<31> )
<32> hidden_states = residual * res_gain + hidden_states
<33> if self.config.ln_positions in ["postln</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 1
hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
hidden_states
)
residual = hidden_states
ff_block = (
GLU(
config=self.config,
ffn_dim=self.config.encoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=True,
)
if self.config.use_glu
else FFN(
config=self.config,
ffn_dim=self.config.encoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=True,
)
)
hidden_states = ff_block(hidden_states, deterministic=deterministic)
hidden_states = residual * res_gain + hidden_states
if self.add_norm or self.config.ln_positions in ["postln"]:
use_scale = (
self.use_scale
or self.config.ln_positions == "postln"
or self.config.force_ln_scale
)
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=use_scale,
)(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: dalle_mini.model.modeling.FlaxBartEncoderLayer
config: DalleBartConfig
dtype: jnp.dtype = jnp.float32
add_norm: bool = False
use_scale: bool = True
at: typing
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
===========changed ref 0===========
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
self.config
)
if self.config.ln_positions in ["normformer", "cogview", "preln"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(x)
x = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
+ use_bias=self.config.use_bias,
- use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
x = ACT2FN[self.config.activation_function](x)
if self.config.ln_positions in ["normformer"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(x)
x = nn.Dropout(rate=self.config.activation_dropout)(
x, deterministic=deterministic
)
x = nn.Dense(
self.embed_dim,
dtype=self.dtype,
+ use_bias=self.config.use_bias,
- use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
if self.config.ln_positions in ["swinv2", "c</s>
===========changed ref 1===========
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
<s>config.init_std),
)(x)
if self.config.ln_positions in ["swinv2", "cogview"]:
x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic)
return x
===========changed ref 2===========
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
self.config
)
if self.config.ln_positions in ["normformer", "cogview", "preln"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(x)
w = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
+ use_bias=self.config.use_bias,
- use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
w = ACT2FN[self.config.activation_function](w)
v = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
+ use_bias=self.config.use_bias,
- use_bias=False,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
x = w * v
if self.config.ln_positions in ["normformer"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(x)
x = nn.Dropout(rate=self.config.activation_dropout)(
x, deterministic=deterministic
)
x = nn.Dense</s>
|
dalle_mini.model.modeling/FlaxBartDecoderLayer.__call__
|
Modified
|
borisdayma~dalle-mini
|
361a994d86d90f758a35b8f94b5588e58b7a52e2
|
feat(model): allow bias (#152)
|
<23>:<add> bias=self.config.use_bias,
<del> bias=False,
|
<s> FlaxBartDecoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
<0> res_gain = (
<1> deepnet_gain["decoder"]["alpha"](self.config)
<2> if self.config.use_deepnet_scaling
<3> else 1
<4> )
<5>
<6> embed_dim = self.config.d_model
<7> residual = hidden_states
<8>
<9> # Self Attention
<10> if self.config.ln_positions in ["normformer", "cogview", "preln"]:
<11> hidden_states = norm(
<12> self.config.ln_type,
<13> dtype=self.dtype,
<14> epsilon=1e-05,
<15> use_scale=self.config.force_ln_scale,
<16> )(hidden_states)
<17> hidden_states, attn_weights = FlaxBartAttention(
<18> config=self.config,
<19> embed_dim=embed_dim,
<20> num_heads=self.config.decoder_attention_heads,
<21> dropout=self.config.attention_dropout,
<22> causal=True,
<23> bias=False,
<24> dtype=self.dtype,
<25> is_encoder=False,
<26> )(
<27> hidden_states=hidden_states,
<28> attention_mask=attention_mask,
<29> init_cache=init_cache,
<30> )
<31>
<32> if self.config.ln_positions in ["normformer", "swinv2", "cogview"]:
<33> hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
<34> hidden_states</s>
|
===========below chunk 0===========
<s>Layer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 1
hidden_states = nn.Dropout(rate=self.config.dropout)(
hidden_states, deterministic=deterministic
)
hidden_states = residual * res_gain + hidden_states
if self.config.ln_positions in ["postln"]:
hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
hidden_states
)
# Cross Attention
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
if self.config.ln_positions in ["normformer", "cogview", "preln"]:
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(hidden_states)
hidden_states, cross_attn_weights = FlaxBartAttention(
config=self.config,
embed_dim=embed_dim,
num_heads=self.config.decoder_attention_heads,
dropout=self.config.attention_dropout,
bias=False,
dtype=self.dtype,
is_encoder=False,
)(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
)
if self.config.ln_positions in ["normformer", "swinv2", "cogview"]:
hidden_states =</s>
===========below chunk 1===========
<s>Layer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 2
<s> if self.config.ln_positions in ["normformer", "swinv2", "cogview"]:
hidden_states = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05
)(hidden_states)
hidden_states = nn.Dropout(rate=self.config.dropout)(
hidden_states, deterministic=deterministic
)
hidden_states = residual * res_gain + hidden_states
if self.config.ln_positions in ["postln"]:
hidden_states = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05
)(hidden_states)
# Feed forward
residual = hidden_states
ff_block = (
GLU(
config=self.config,
ffn_dim=self.config.decoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=False,
)
if self.config.use_glu
else FFN(
config=self.config,
ffn_dim=self.config.decoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=False,
)
)
hidden_states = ff_block(hidden_states, deterministic=deterministic)
hidden_states = residual * res_gain</s>
===========below chunk 2===========
<s>Layer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 3
<s>_states
if self.add_norm or self.config.ln_positions in ["postln"]:
use_scale = (
self.use_scale
or self.config.ln_positions == "postln"
or self.config.force_ln_scale
)
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=use_scale,
)(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights, cross_attn_weights)
return outputs
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: dalle_mini.model.modeling.FlaxBartDecoderLayer
config: DalleBartConfig
dtype: jnp.dtype = jnp.float32
add_norm: bool = False
use_scale: bool = False
at: typing
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
|
dalle_mini.model.modeling/dot_product_attention_weights
|
Modified
|
borisdayma~dalle-mini
|
eed4896bca4400b4740117d2bfed6c018aa189b4
|
fix: sinkformer gradient
|
<s>
def dot_product_attention_weights(
query: Any,
key: Any,
bias: Optional[Any] = None,
mask: Optional[Any] = None,
broadcast_dropout: bool = True,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.0,
deterministic: bool = False,
dtype: Any = jnp.float32,
precision: PrecisionLike = None,
sinkhorn_iters: int = 1,
):
<0> """
<1> Computes dot-product attention weights given query and key.
<2>
<3> Adapted from flax.linen.attention.dot_product_attention_weights"
<4> """
<5> assert query.ndim == key.ndim, "q, k must have same rank."
<6> assert query.shape[:-3] == key.shape[:-3], "q, k batch dims must match."
<7> assert query.shape[-2] == key.shape[-2], "q, k num_heads must match."
<8> assert query.shape[-1] == key.shape[-1], "q, k depths must match."
<9>
<10> # calculate attention matrix
<11> depth = query.shape[-1]
<12> query = query / jnp.sqrt(depth).astype(dtype)
<13> # attn weight shape is (batch..., num_heads, q_length, kv_length)
<14> attn_weights = jnp.einsum("...qhd,...khd->...hqk", query, key, precision=precision)
<15>
<16> # apply attention bias: masking, dropout, proximity bias, etc.
<17> if bias is not None:
<18> attn_weights = attn_weights + bias
<19> # apply attention mask
<20> if mask is not None:
<21> big_neg = jnp.finfo(dtype).min
<22> attn_weights = jnp.where(mask, attn_weights, big_neg)
<23>
<24> # normalize the attention weights
<25> attn_weights = jax.nn.softmax(attn_weights).astype(dtype)
</s>
|
===========below chunk 0===========
<s>_attention_weights(
query: Any,
key: Any,
bias: Optional[Any] = None,
mask: Optional[Any] = None,
broadcast_dropout: bool = True,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.0,
deterministic: bool = False,
dtype: Any = jnp.float32,
precision: PrecisionLike = None,
sinkhorn_iters: int = 1,
):
# offset: 1
axis = -2 if i % 2 == 0 else -1
attn_weights /= 1e-8 + jnp.sum(attn_weights, axis=axis, keepdims=True)
# apply attention dropout
if not deterministic and dropout_rate > 0.0:
keep_prob = 1.0 - dropout_rate
if broadcast_dropout:
# dropout is broadcast across the batch + head dimensions
dropout_shape = tuple([1] * (key.ndim - 2)) + attn_weights.shape[-2:]
keep = jax.random.bernoulli(dropout_rng, keep_prob, dropout_shape)
else:
keep = jax.random.bernoulli(dropout_rng, keep_prob, attn_weights.shape)
multiplier = keep.astype(attn_weights.dtype) / jnp.asarray(
keep_prob, dtype=dtype
)
attn_weights = attn_weights * multiplier
return attn_weights
|
|
dalle_mini.model.modeling/FlaxBartAttention.__call__
|
Modified
|
borisdayma~dalle-mini
|
eed4896bca4400b4740117d2bfed6c018aa189b4
|
fix: sinkformer gradient
|
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
<0> """Input shape: Batch x Time x Channel"""
<1>
<2> # if key_value_states are provided this layer is used as a cross-attention layer
<3> # for the decoder
<4> is_cross_attention = key_value_states is not None
<5> batch_size = hidden_states.shape[0]
<6>
<7> # get query proj
<8> query_states = self.q_proj(hidden_states)
<9> # get key, value proj
<10> if is_cross_attention:
<11> # cross_attentions
<12> key_states = self.k_proj(key_value_states)
<13> value_states = self.v_proj(key_value_states)
<14> else:
<15> # self_attention
<16> key_states = self.k_proj(hidden_states)
<17> value_states = self.v_proj(hidden_states)
<18>
<19> query_states = self._split_heads(query_states)
<20> key_states = self._split_heads(key_states)
<21> value_states = self._split_heads(value_states)
<22>
<23> # handle cache prepare causal attention mask
<24> if self.causal:
<25> query_length, key_length = query_states.shape[1], key_states.shape[1]
<26> if self.has_variable("cache", "cached_key"):
<27> mask_shift = self.variables["cache"]["cache_index"]
<28> max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
<29> causal_mask = lax.dynamic_slice(
</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 1
(0, 0, mask_shift, 0),
(1, 1, query_length, max_decoder_length),
)
else:
causal_mask = self.causal_mask[:, :, :query_length, :key_length]
causal_mask = jnp.broadcast_to(
causal_mask, (batch_size,) + causal_mask.shape[1:]
)
# combine masks if needed
if attention_mask is not None and self.causal:
attention_mask = jnp.broadcast_to(
jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape
)
attention_mask = combine_masks(attention_mask, causal_mask)
elif self.causal:
attention_mask = causal_mask
elif attention_mask is not None:
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
key_states, value_states, attention_mask = self._concatenate_to_cache(
key_states, value_states, query_states, attention_mask
)
# Convert the boolean attention mask to an attention bias.
if attention_mask is not None:
# attention mask in the form of</s>
===========below chunk 1===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 2
<s> # Convert the boolean attention mask to an attention bias.
if attention_mask is not None:
# attention mask in the form of attention bias
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, float("-inf")).astype(self.dtype),
)
else:
attention_bias = None
dropout_rng = None
if not deterministic and self.dropout > 0.0:
dropout_rng = self.make_rng("dropout")
if self.config.use_cosine_attention:
# normalize q and k
query_states = query_states / (
jnp.linalg.norm(query_states, axis=-1, keepdims=True) + 1e-8
)
key_states = key_states / (
jnp.linalg.norm(key_states, axis=-1, keepdims=True) + 1e-8
)
attn_weights = dot_product_attention_weights(
query_states,
key_states,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.dropout,
broadcast_dropout=True,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
sinkhorn_iters=self.config</s>
===========below chunk 2===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 3
<s>horn_iters,
)
if self.config.use_cosine_attention:
# divide by tau
attn_weights = attn_weights / jnp.maximum(self.tau, 0.01)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
if self.config.use_head_scale:
# per Normformer
attn_output = attn_output * self.head_scale
attn_output = self._merge_heads(attn_output)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
===========unchanged ref 0===========
at: dalle_mini.model.modeling
dot_product_attention_weights(query: Any, key: Any, bias: Optional[Any]=None, mask: Optional[Any]=None, broadcast_dropout: bool=True, dropout_rng: Optional[PRNGKey]=None, dropout_rate: float=0.0, deterministic: bool=False, dtype: Any=jnp.float32, precision: PrecisionLike=None, sinkhorn_iters: int=1)
at: dalle_mini.model.modeling.FlaxBartAttention.setup
self.q_proj = dense(
kernel_init=deepnet_init()
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.k_proj = dense(
kernel_init=deepnet_init()
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.v_proj = dense(
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
at: transformers.models.bart.modeling_flax_bart.FlaxBartAttention
config: BartConfig
embed_dim: int
num_heads: int
dropout: float = 0.0
causal: bool = False
bias: bool = True
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
_split_heads(hidden_states)
_concatenate_to_cache(key, value, query, attention_mask)
===========unchanged ref 1===========
__call__(self, hidden_states: jnp.ndarray, key_value_states: Optional[jnp.ndarray]=None, attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, deterministic: bool=True) -> Tuple[jnp.ndarray]
at: typing
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
|
|
dalle_mini.model.modeling/dot_product_attention_weights
|
Modified
|
borisdayma~dalle-mini
|
00d46610722aa97e6bdd505fdc237c4a8dea6c58
|
feat: sinkhorn in lse mode (#155)
|
<2>:<add> mask is included into the bias.
<19>:<del> # apply attention mask
<20>:<del> if mask is not None:
<21>:<del> big_neg = jnp.finfo(dtype).min
<22>:<del> attn_weights = jnp.where(mask, attn_weights, big_neg)
<25>:<add> if causal or sinkhorn_iters == 1:
<add> # sinkhorn does not work for causal (leaks info of future tokens into past)
<add> attn_weights = jax.nn.softmax(attn_weights).astype(dtype)
<del> attn_weights = jax.nn.softmax(attn_weights).astype(dtype)
|
<s> query: Any,
key: Any,
bias: Optional[Any] = None,
mask: Optional[Any] = None,
broadcast_dropout: bool = True,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.0,
deterministic: bool = False,
dtype: Any = jnp.float32,
precision: PrecisionLike = None,
sinkhorn_iters: int = 1,
+ causal: bool = False,
):
<0> """
<1> Computes dot-product attention weights given query and key.
<2>
<3> Adapted from flax.linen.attention.dot_product_attention_weights"
<4> """
<5> assert query.ndim == key.ndim, "q, k must have same rank."
<6> assert query.shape[:-3] == key.shape[:-3], "q, k batch dims must match."
<7> assert query.shape[-2] == key.shape[-2], "q, k num_heads must match."
<8> assert query.shape[-1] == key.shape[-1], "q, k depths must match."
<9>
<10> # calculate attention matrix
<11> depth = query.shape[-1]
<12> query = query / jnp.sqrt(depth).astype(dtype)
<13> # attn weight shape is (batch..., num_heads, q_length, kv_length)
<14> attn_weights = jnp.einsum("...qhd,...khd->...hqk", query, key, precision=precision)
<15>
<16> # apply attention bias: masking, dropout, proximity bias, etc.
<17> if bias is not None:
<18> attn_weights = attn_weights + bias
<19> # apply attention mask
<20> if mask is not None:
<21> big_neg = jnp.finfo(dtype).min
<22> attn_weights = jnp.where(mask, attn_weights, big_neg)
<23>
<24> # normalize the attention weights
<25> attn_weights = jax.nn.softmax(attn_weights).astype(dtype)
</s>
|
===========below chunk 0===========
<s>
key: Any,
bias: Optional[Any] = None,
mask: Optional[Any] = None,
broadcast_dropout: bool = True,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.0,
deterministic: bool = False,
dtype: Any = jnp.float32,
precision: PrecisionLike = None,
sinkhorn_iters: int = 1,
+ causal: bool = False,
):
# offset: 1
# TODO: this is unstable, requires lse space
axis = -2 if i % 2 == 0 else -1
if mask is not None:
attn_weights = jnp.where(
mask > 0,
attn_weights
/ (
1e-5
+ jax.lax.stop_gradient(
jnp.sum(attn_weights, axis=axis, where=mask, keepdims=True)
)
),
0.0,
)
else:
attn_weights = attn_weights / (
1e-5
+ jax.lax.stop_gradient(jnp.sum(attn_weights, axis=axis, keepdims=True))
)
# apply attention dropout
if not deterministic and dropout_rate > 0.0:
keep_prob = 1.0 - dropout_rate
if broadcast_dropout:
# dropout is broadcast across the batch + head dimensions
dropout_shape = tuple([1] * (key.ndim - 2)) + attn_weights.shape[-2:]
keep = jax.random.bernoulli(dropout_rng, keep_prob, dropout_shape)
else:
keep = jax.random.bernoulli(dropout_rng, keep_prob, attn_weights.shape)
multiplier = keep.astype(attn_weights.dtype) / jnp.asarray(
keep_prob, dtype=dtype
)
attn_weights = attn_weights * multiplier
return attn_weights
|
dalle_mini.model.modeling/FlaxBartAttention.__call__
|
Modified
|
borisdayma~dalle-mini
|
00d46610722aa97e6bdd505fdc237c4a8dea6c58
|
feat: sinkhorn in lse mode (#155)
|
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
<0> """Input shape: Batch x Time x Channel"""
<1>
<2> # if key_value_states are provided this layer is used as a cross-attention layer
<3> # for the decoder
<4> is_cross_attention = key_value_states is not None
<5> batch_size = hidden_states.shape[0]
<6>
<7> # get query proj
<8> query_states = self.q_proj(hidden_states)
<9> # get key, value proj
<10> if is_cross_attention:
<11> # cross_attentions
<12> key_states = self.k_proj(key_value_states)
<13> value_states = self.v_proj(key_value_states)
<14> else:
<15> # self_attention
<16> key_states = self.k_proj(hidden_states)
<17> value_states = self.v_proj(hidden_states)
<18>
<19> query_states = self._split_heads(query_states)
<20> key_states = self._split_heads(key_states)
<21> value_states = self._split_heads(value_states)
<22>
<23> # handle cache prepare causal attention mask
<24> if self.causal:
<25> query_length, key_length = query_states.shape[1], key_states.shape[1]
<26> if self.has_variable("cache", "cached_key"):
<27> mask_shift = self.variables["cache"]["cache_index"]
<28> max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
<29> causal_mask = lax.dynamic_slice(
</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 1
(0, 0, mask_shift, 0),
(1, 1, query_length, max_decoder_length),
)
else:
causal_mask = self.causal_mask[:, :, :query_length, :key_length]
causal_mask = jnp.broadcast_to(
causal_mask, (batch_size,) + causal_mask.shape[1:]
)
# combine masks if needed
if attention_mask is not None and self.causal:
attention_mask = jnp.broadcast_to(
jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape
)
attention_mask = combine_masks(attention_mask, causal_mask)
elif self.causal:
attention_mask = causal_mask
elif attention_mask is not None:
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
key_states, value_states, attention_mask = self._concatenate_to_cache(
key_states, value_states, query_states, attention_mask
)
# Convert the boolean attention mask to an attention bias.
if attention_mask is not None:
# attention mask in the form of</s>
===========below chunk 1===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 2
<s> # Convert the boolean attention mask to an attention bias.
if attention_mask is not None:
# attention mask in the form of attention bias
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, float("-inf")).astype(self.dtype),
)
else:
attention_bias = None
dropout_rng = None
if not deterministic and self.dropout > 0.0:
dropout_rng = self.make_rng("dropout")
if self.config.use_cosine_attention:
# normalize q and k
query_states = query_states / (
jnp.linalg.norm(query_states, axis=-1, keepdims=True) + 1e-8
)
key_states = key_states / (
jnp.linalg.norm(key_states, axis=-1, keepdims=True) + 1e-8
)
attn_weights = dot_product_attention_weights(
query_states,
key_states,
bias=attention_bias,
mask=attention_mask,
dropout_rng=dropout_rng,
dropout_rate=self.dropout,
broadcast_dropout=True,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
sink</s>
===========below chunk 2===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 3
<s>_iters=self.config.sinkhorn_iters,
)
if self.config.use_cosine_attention:
# divide by tau
attn_weights = attn_weights / jnp.maximum(self.tau, 0.01)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
if self.config.use_head_scale:
# per Normformer
attn_output = attn_output * self.head_scale
attn_output = self._merge_heads(attn_output)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
===========unchanged ref 0===========
at: dalle_mini.model.modeling
dot_product_attention_weights(query: Any, key: Any, bias: Optional[Any]=None, mask: Optional[Any]=None, broadcast_dropout: bool=True, dropout_rng: Optional[PRNGKey]=None, dropout_rate: float=0.0, deterministic: bool=False, dtype: Any=jnp.float32, precision: PrecisionLike=None, sinkhorn_iters: int=1, causal: bool=False)
at: dalle_mini.model.modeling.FlaxBartAttention.setup
self.q_proj = dense(
kernel_init=deepnet_init()
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.k_proj = dense(
kernel_init=deepnet_init()
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.v_proj = dense(
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.causal_mask = make_causal_mask(
jnp.ones((1, self.config.image_length), dtype="bool"), dtype="bool"
)
at: dalle_mini.model.modeling.GLU
dtype: jnp.dtype = jnp.float32
is_encoder: bool = False
at: transformers.models.bart.modeling_flax_bart.FlaxBartAttention
config: BartConfig
embed_dim: int
num_heads: int
dropout: float = 0.0
causal: bool = False
bias: bool = True
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
===========unchanged ref 1===========
_split_heads(hidden_states)
_concatenate_to_cache(key, value, query, attention_mask)
|
|
dalle_mini.model.partitions/_get_partition_rules
|
Modified
|
borisdayma~dalle-mini
|
769d20ac4c8fa675067364e61a9a1c6898b7ba91
|
feat: allow relative position (#156)
|
<4>:<add> (("rel_bias", "embedding"), P(None, "mp")),
|
# module: dalle_mini.model.partitions
def _get_partition_rules():
<0> return [
<1> # embeddings
<2> (("embed_positions", "embedding"), P("mp", None)),
<3> (("embed_tokens", "embedding"), P("mp", None)),
<4> # attention
<5> (("(q_proj|k_proj|v_proj)", "kernel"), P(None, "mp")),
<6> (("out_proj", "kernel"), P("mp", None)),
<7> # FFN
<8> (("Dense_0", "kernel"), P(None, "mp")),
<9> (("GLU.*", "Dense_1", "kernel"), P(None, "mp")),
<10> (("GLU.*", "Dense_2", "kernel"), P("mp", None)),
<11> (("FFN.*", "Dense_1", "kernel"), P("mp", None)),
<12> # layer norms
<13> (("(bias|scale)",), None),
<14> (("lm_head", "kernel"), P(None, "mp")),
<15> # head scale and tau
<16> (("(head_scale|tau)",), None),
<17> ]
<18>
| |
dalle_mini.model.configuration/DalleBartConfig.__init__
|
Modified
|
borisdayma~dalle-mini
|
769d20ac4c8fa675067364e61a9a1c6898b7ba91
|
feat: allow relative position (#156)
|
<24>:<add> self.use_absolute_position_embeddings = use_absolute_position_embeddings
<add> self.use_swin_position_embeddings = use_swin_position_embeddings
|
<s>ers
+ use_final_ln_encoder=True, # final layer normalization in encoder
- use_final_ln_encoder=False, # final layer normalization in encoder
+ use_final_ln_decoder=True, # final layer normalization in decoder
- use_final_ln_decoder=False, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
force_ln_scale=False, # force scale in layernorm even when followed by dense layers
**kwargs,
):
<0> # text normalizer
<1> self.normalize_text = normalize_text
<2>
<3> # transformer variants
<4> self.use_bias = use_bias
<5> assert ln_type in [
<6> "rmsnorm",
<7> "layernorm",
<8> ], "ln_type must be 'rmsnorm' or 'layernorm'"
<9> self.ln_type = ln_type
<10> if ln_positions == "deepnet":
<11> ln_positions = "postln"
<12> assert ln_positions in [
<13> "normformer",
<14> "swinv2",
<15> "cogview",
<16> "postln",
<17> "preln",
<18> ], "ln_positions must be 'normformer', 'swinv2', 'cogview', 'postln', 'preln'"
<19> self.use_head_scale = use_head_scale
<20> assert use_alibi is False, "use_alibi is not supported yet"
<21> self.ln_positions = ln_positions
<22> self.use_cosine_attention = use_cosine_attention
<23> self.tau_init = tau_init
<24> self.use_deepnet_scaling = use_deepnet_scaling
<25> self.use_glu = use_glu
<26> self.use_alibi = use_alibi
<27> self.sinkhorn_iters = sinkhorn_iters
<28> if ln_positions == "postln":
<29> assert (
<30> use_final_ln_encoder
<31> ), "use_final_ln_encoder must be True when</s>
|
===========below chunk 0===========
<s>_final_ln_encoder=True, # final layer normalization in encoder
- use_final_ln_encoder=False, # final layer normalization in encoder
+ use_final_ln_decoder=True, # final layer normalization in decoder
- use_final_ln_decoder=False, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
force_ln_scale=False, # force scale in layernorm even when followed by dense layers
**kwargs,
):
# offset: 1
assert (
use_final_ln_decoder
), "use_final_ln_decoder must be True when ln_positions is 'postln'"
self.use_final_ln_encoder = use_final_ln_encoder
self.use_final_ln_decoder = use_final_ln_decoder
self.force_ln_scale = force_ln_scale
# common parameters
self.encoder_vocab_size = encoder_vocab_size
self.image_vocab_size = image_vocab_size
self.image_length = image_length
self.max_text_length = max_text_length
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.use_cache = use_cache
self.gradient_checkpointing = gradient_checkpointing
self.scale_embedding = (
scale_embedding # scale factor will be sqrt(d_model) if True
)
# special token id's are appended to vocab if not provided
decoder_start_token_id = kwargs.pop("</s>
===========below chunk 1===========
<s>_final_ln_encoder=True, # final layer normalization in encoder
- use_final_ln_encoder=False, # final layer normalization in encoder
+ use_final_ln_decoder=True, # final layer normalization in decoder
- use_final_ln_decoder=False, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
force_ln_scale=False, # force scale in layernorm even when followed by dense layers
**kwargs,
):
# offset: 2
<s> )
# special token id's are appended to vocab if not provided
decoder_start_token_id = kwargs.pop("decoder_start_token_id", image_vocab_size)
bos_token_id = kwargs.pop("bos_token_id", image_vocab_size)
pad_token_id = kwargs.pop("pad_token_id", image_vocab_size)
eos_token_id = kwargs.pop("eos_token_id", image_vocab_size)
# we generate to image_length + 1 (for bos) by default
min_length = kwargs.pop("min_length", image_length + 1)
max_length = kwargs.pop("max_length", image_length + 1)
super().__init__(
# args required in parent class
is_encoder_decoder=is_encoder_decoder,
tie_word_embeddings=tie_word_embeddings,
forced_eos_token_id=forced_eos_token_id,
decoder_start_token_id=decoder_start_token_id,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
min_length=min_length,
max_length=max_length,
do_sample=do_sample,
**kwargs,
)
# ensure</s>
===========below chunk 2===========
<s>_final_ln_encoder=True, # final layer normalization in encoder
- use_final_ln_encoder=False, # final layer normalization in encoder
+ use_final_ln_decoder=True, # final layer normalization in decoder
- use_final_ln_decoder=False, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
force_ln_scale=False, # force scale in layernorm even when followed by dense layers
**kwargs,
):
# offset: 3
<s> for BART CNN models
if self.forced_bos_token_id is None and kwargs.get(
"force_bos_token_to_be_generated", False
):
self.forced_bos_token_id = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions."
"The config can simply be saved and uploaded again to be fixed."
)
===========unchanged ref 0===========
at: transformers.configuration_utils.PretrainedConfig
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
_auto_class: Optional[str] = None
__init__(**kwargs)
__init__(self, **kwargs)
at: transformers.configuration_utils.PretrainedConfig.__init__
self.bos_token_id = kwargs.pop("bos_token_id", None)
at: typing.Mapping
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
get(key: _KT) -> Optional[_VT_co]
at: typing.MutableMapping
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
pop(key: _KT) -> _VT
===========changed ref 0===========
# module: dalle_mini.model.partitions
def _get_partition_rules():
return [
# embeddings
(("embed_positions", "embedding"), P("mp", None)),
(("embed_tokens", "embedding"), P("mp", None)),
+ (("rel_bias", "embedding"), P(None, "mp")),
# attention
(("(q_proj|k_proj|v_proj)", "kernel"), P(None, "mp")),
(("out_proj", "kernel"), P("mp", None)),
# FFN
(("Dense_0", "kernel"), P(None, "mp")),
(("GLU.*", "Dense_1", "kernel"), P(None, "mp")),
(("GLU.*", "Dense_2", "kernel"), P("mp", None)),
(("FFN.*", "Dense_1", "kernel"), P("mp", None)),
# layer norms
(("(bias|scale)",), None),
(("lm_head", "kernel"), P(None, "mp")),
# head scale and tau
(("(head_scale|tau)",), None),
]
|
dalle_mini.model.modeling/dot_product_attention_weights
|
Modified
|
borisdayma~dalle-mini
|
769d20ac4c8fa675067364e61a9a1c6898b7ba91
|
feat: allow relative position (#156)
|
<20>:<add>
<add> # add relative position
<add> if embed_pos is not None:
<add> attn_weights = attn_weights + embed_pos
|
<s> bias: Optional[Any] = None,
mask: Optional[Any] = None,
+ embed_pos: Optional[Any] = None,
broadcast_dropout: bool = True,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.0,
deterministic: bool = False,
dtype: Any = jnp.float32,
precision: PrecisionLike = None,
sinkhorn_iters: int = 1,
causal: bool = False,
):
<0> """
<1> Computes dot-product attention weights given query and key.
<2> mask is included into the bias.
<3>
<4> Adapted from flax.linen.attention.dot_product_attention_weights"
<5> """
<6> assert query.ndim == key.ndim, "q, k must have same rank."
<7> assert query.shape[:-3] == key.shape[:-3], "q, k batch dims must match."
<8> assert query.shape[-2] == key.shape[-2], "q, k num_heads must match."
<9> assert query.shape[-1] == key.shape[-1], "q, k depths must match."
<10>
<11> # calculate attention matrix
<12> depth = query.shape[-1]
<13> query = query / jnp.sqrt(depth).astype(dtype)
<14> # attn weight shape is (batch..., num_heads, q_length, kv_length)
<15> attn_weights = jnp.einsum("...qhd,...khd->...hqk", query, key, precision=precision)
<16>
<17> # apply attention bias: masking, dropout, proximity bias, etc.
<18> if bias is not None:
<19> attn_weights = attn_weights + bias
<20>
<21> # normalize the attention weights
<22> if causal or sinkhorn_iters == 1:
<23> # sinkhorn does not work for causal (leaks info of future tokens into past)
<24> attn_weights = jax.nn.softmax(attn_weights).astype(dtype)
<25> else:
</s>
|
===========below chunk 0===========
<s>] = None,
mask: Optional[Any] = None,
+ embed_pos: Optional[Any] = None,
broadcast_dropout: bool = True,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.0,
deterministic: bool = False,
dtype: Any = jnp.float32,
precision: PrecisionLike = None,
sinkhorn_iters: int = 1,
causal: bool = False,
):
# offset: 1
for i in range(sinkhorn_iters):
# when causal, some attn_weights have been set to -inf through bias
if i % 2 == 0:
attn_weights -= jax.nn.logsumexp(attn_weights, axis=-1, keepdims=True)
else:
attn_weights -= jax.nn.logsumexp(attn_weights, axis=-2, keepdims=True)
if mask is not None:
attn_weights = jnp.where(mask, attn_weights, -jnp.inf)
attn_weights = jnp.exp(attn_weights).astype(dtype)
# apply attention dropout
if not deterministic and dropout_rate > 0.0:
keep_prob = 1.0 - dropout_rate
if broadcast_dropout:
# dropout is broadcast across the batch + head dimensions
dropout_shape = tuple([1] * (key.ndim - 2)) + attn_weights.shape[-2:]
keep = jax.random.bernoulli(dropout_rng, keep_prob, dropout_shape)
else:
keep = jax.random.bernoulli(dropout_rng, keep_prob, attn_weights.shape)
multiplier = keep.astype(attn_weights.dtype) / jnp.asarray(
keep_prob, dtype=dtype
)
attn_weights = attn_weights * multiplier
return attn_weights
===========changed ref 0===========
# module: dalle_mini.model.partitions
def _get_partition_rules():
return [
# embeddings
(("embed_positions", "embedding"), P("mp", None)),
(("embed_tokens", "embedding"), P("mp", None)),
+ (("rel_bias", "embedding"), P(None, "mp")),
# attention
(("(q_proj|k_proj|v_proj)", "kernel"), P(None, "mp")),
(("out_proj", "kernel"), P("mp", None)),
# FFN
(("Dense_0", "kernel"), P(None, "mp")),
(("GLU.*", "Dense_1", "kernel"), P(None, "mp")),
(("GLU.*", "Dense_2", "kernel"), P("mp", None)),
(("FFN.*", "Dense_1", "kernel"), P("mp", None)),
# layer norms
(("(bias|scale)",), None),
(("lm_head", "kernel"), P(None, "mp")),
# head scale and tau
(("(head_scale|tau)",), None),
]
===========changed ref 1===========
<s>ers
+ use_final_ln_encoder=True, # final layer normalization in encoder
- use_final_ln_encoder=False, # final layer normalization in encoder
+ use_final_ln_decoder=True, # final layer normalization in decoder
- use_final_ln_decoder=False, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
force_ln_scale=False, # force scale in layernorm even when followed by dense layers
**kwargs,
):
# text normalizer
self.normalize_text = normalize_text
# transformer variants
self.use_bias = use_bias
assert ln_type in [
"rmsnorm",
"layernorm",
], "ln_type must be 'rmsnorm' or 'layernorm'"
self.ln_type = ln_type
if ln_positions == "deepnet":
ln_positions = "postln"
assert ln_positions in [
"normformer",
"swinv2",
"cogview",
"postln",
"preln",
], "ln_positions must be 'normformer', 'swinv2', 'cogview', 'postln', 'preln'"
self.use_head_scale = use_head_scale
assert use_alibi is False, "use_alibi is not supported yet"
self.ln_positions = ln_positions
self.use_cosine_attention = use_cosine_attention
self.tau_init = tau_init
+ self.use_absolute_position_embeddings = use_absolute_position_embeddings
+ self.use_swin_position_embeddings = use_swin_position_embeddings
self.use_deepnet_scaling = use_deepnet_scaling
self.use_glu = use_glu
self.use_alibi = use_alibi
self.sinkhorn_iters = sinkhorn_iters
if ln_positions == "postln":
assert (
use_final_ln_encoder
</s>
===========changed ref 2===========
<s>_final_ln_encoder=True, # final layer normalization in encoder
- use_final_ln_encoder=False, # final layer normalization in encoder
+ use_final_ln_decoder=True, # final layer normalization in decoder
- use_final_ln_decoder=False, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
force_ln_scale=False, # force scale in layernorm even when followed by dense layers
**kwargs,
):
# offset: 1
<s>horn_iters
if ln_positions == "postln":
assert (
use_final_ln_encoder
), "use_final_ln_encoder must be True when ln_positions is 'postln'"
assert (
use_final_ln_decoder
), "use_final_ln_decoder must be True when ln_positions is 'postln'"
self.use_final_ln_encoder = use_final_ln_encoder
self.use_final_ln_decoder = use_final_ln_decoder
self.force_ln_scale = force_ln_scale
# common parameters
self.encoder_vocab_size = encoder_vocab_size
self.image_vocab_size = image_vocab_size
self.image_length = image_length
self.max_text_length = max_text_length
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init</s>
|
dalle_mini.model.modeling/FlaxBartAttention.setup
|
Modified
|
borisdayma~dalle-mini
|
769d20ac4c8fa675067364e61a9a1c6898b7ba91
|
feat: allow relative position (#156)
|
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def setup(self) -> None:
<0> self.head_dim = self.embed_dim // self.num_heads
<1> if self.head_dim * self.num_heads != self.embed_dim:
<2> raise ValueError(
<3> f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
<4> f" and `num_heads`: {self.num_heads})."
<5> )
<6>
<7> dense = partial(
<8> nn.Dense,
<9> self.embed_dim,
<10> use_bias=self.bias,
<11> dtype=self.dtype,
<12> )
<13>
<14> gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
<15> self.config
<16> )
<17>
<18> self.q_proj = dense(
<19> kernel_init=deepnet_init()
<20> if self.config.use_deepnet_scaling
<21> else jax.nn.initializers.normal(self.config.init_std)
<22> )
<23> self.k_proj = dense(
<24> kernel_init=deepnet_init()
<25> if self.config.use_deepnet_scaling
<26> else jax.nn.initializers.normal(self.config.init_std)
<27> )
<28> self.v_proj = dense(
<29> kernel_init=deepnet_init(gain)
<30> if self.config.use_deepnet_scaling
<31> else jax.nn.initializers.normal(self.config.init_std)
<32> )
<33> self.out_proj = dense(
<34> kernel_init=deepnet_init(gain)
<35> if self.config.use_deepnet_scaling
<36> else jax.nn.initializers.normal(self.config.init_std)
<37> )
<38> self.dropout_layer = nn.Dropout(rate=self.dropout)
<39>
<40> if</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def setup(self) -> None:
# offset: 1
self.head_scale = self.param(
"head_scale", jax.nn.initializers.ones, (1, 1, self.num_heads, 1)
)
if self.config.use_cosine_attention:
self.tau = self.param(
"tau",
jax.nn.initializers.constant(self.config.tau_init),
(1, self.num_heads, 1, 1),
)
if self.causal:
# used only in decoder
self.causal_mask = make_causal_mask(
jnp.ones((1, self.config.image_length), dtype="bool"), dtype="bool"
)
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_init(gain=1)
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
at: dalle_mini.model.modeling.FlaxBartAttention
is_encoder: bool = False
at: functools
partial(func: Callable[..., _T], *args: Any, **kwargs: Any)
partial(func, *args, **keywords, /) -> function with partial application()
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.init_std = init_std
at: transformers.models.bart.modeling_flax_bart.FlaxBartAttention
config: BartConfig
embed_dim: int
num_heads: int
dropout: float = 0.0
causal: bool = False
bias: bool = True
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
setup(self) -> None
===========changed ref 0===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
"""
Edits:
- causal mask is used only in decoder and considers image_length
- scale attention heads per NormFormer paper
"""
is_encoder: bool = False
+ q_length: int = None
+ k_length: int = None
===========changed ref 1===========
<s> bias: Optional[Any] = None,
mask: Optional[Any] = None,
+ embed_pos: Optional[Any] = None,
broadcast_dropout: bool = True,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.0,
deterministic: bool = False,
dtype: Any = jnp.float32,
precision: PrecisionLike = None,
sinkhorn_iters: int = 1,
causal: bool = False,
):
"""
Computes dot-product attention weights given query and key.
mask is included into the bias.
Adapted from flax.linen.attention.dot_product_attention_weights"
"""
assert query.ndim == key.ndim, "q, k must have same rank."
assert query.shape[:-3] == key.shape[:-3], "q, k batch dims must match."
assert query.shape[-2] == key.shape[-2], "q, k num_heads must match."
assert query.shape[-1] == key.shape[-1], "q, k depths must match."
# calculate attention matrix
depth = query.shape[-1]
query = query / jnp.sqrt(depth).astype(dtype)
# attn weight shape is (batch..., num_heads, q_length, kv_length)
attn_weights = jnp.einsum("...qhd,...khd->...hqk", query, key, precision=precision)
# apply attention bias: masking, dropout, proximity bias, etc.
if bias is not None:
attn_weights = attn_weights + bias
+
+ # add relative position
+ if embed_pos is not None:
+ attn_weights = attn_weights + embed_pos
# normalize the attention weights
if causal or sinkhorn_iters == 1:
# sinkhorn does not work for causal (leaks info of future tokens into past)
attn_weights = jax.nn.softmax(attn_weights).</s>
===========changed ref 2===========
<s>] = None,
mask: Optional[Any] = None,
+ embed_pos: Optional[Any] = None,
broadcast_dropout: bool = True,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.0,
deterministic: bool = False,
dtype: Any = jnp.float32,
precision: PrecisionLike = None,
sinkhorn_iters: int = 1,
causal: bool = False,
):
# offset: 1
<s> (leaks info of future tokens into past)
attn_weights = jax.nn.softmax(attn_weights).astype(dtype)
else:
# adapted from https://github.com/lucidrains/sinkhorn-transformer
for i in range(sinkhorn_iters):
# when causal, some attn_weights have been set to -inf through bias
if i % 2 == 0:
attn_weights -= jax.nn.logsumexp(attn_weights, axis=-1, keepdims=True)
else:
attn_weights -= jax.nn.logsumexp(attn_weights, axis=-2, keepdims=True)
if mask is not None:
attn_weights = jnp.where(mask, attn_weights, -jnp.inf)
attn_weights = jnp.exp(attn_weights).astype(dtype)
# apply attention dropout
if not deterministic and dropout_rate > 0.0:
keep_prob = 1.0 - dropout_rate
if broadcast_dropout:
# dropout is broadcast across the batch + head dimensions
dropout_shape = tuple([1] * (key.ndim - 2)) + attn_weights.shape[-2:]
keep = jax.random.bernoulli(dropout_rng, keep_prob, dropout_shape)
else:
keep = jax.random.bernoulli(dropout_rng</s>
===========changed ref 3===========
<s>] = None,
mask: Optional[Any] = None,
+ embed_pos: Optional[Any] = None,
broadcast_dropout: bool = True,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.0,
deterministic: bool = False,
dtype: Any = jnp.float32,
precision: PrecisionLike = None,
sinkhorn_iters: int = 1,
causal: bool = False,
):
# offset: 2
<s>_prob, attn_weights.shape)
multiplier = keep.astype(attn_weights.dtype) / jnp.asarray(
keep_prob, dtype=dtype
)
attn_weights = attn_weights * multiplier
return attn_weights
|
|
dalle_mini.model.modeling/FlaxBartAttention.__call__
|
Modified
|
borisdayma~dalle-mini
|
769d20ac4c8fa675067364e61a9a1c6898b7ba91
|
feat: allow relative position (#156)
|
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
<0> """Input shape: Batch x Time x Channel"""
<1>
<2> # if key_value_states are provided this layer is used as a cross-attention layer
<3> # for the decoder
<4> is_cross_attention = key_value_states is not None
<5> batch_size = hidden_states.shape[0]
<6>
<7> # get query proj
<8> query_states = self.q_proj(hidden_states)
<9> # get key, value proj
<10> if is_cross_attention:
<11> # cross_attentions
<12> key_states = self.k_proj(key_value_states)
<13> value_states = self.v_proj(key_value_states)
<14> else:
<15> # self_attention
<16> key_states = self.k_proj(hidden_states)
<17> value_states = self.v_proj(hidden_states)
<18>
<19> query_states = self._split_heads(query_states)
<20> key_states = self._split_heads(key_states)
<21> value_states = self._split_heads(value_states)
<22>
<23> # handle cache prepare causal attention mask
<24> if self.causal:
<25> query_length, key_length = query_states.shape[1], key_states.shape[1]
<26> if self.has_variable("cache", "cached_key"):
<27> mask_shift = self.variables["cache"]["cache_index"]
<28> max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
<29> causal_mask = lax.dynamic_slice(
</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 1
(0, 0, mask_shift, 0),
(1, 1, query_length, max_decoder_length),
)
else:
causal_mask = self.causal_mask[:, :, :query_length, :key_length]
causal_mask = jnp.broadcast_to(
causal_mask, (batch_size,) + causal_mask.shape[1:]
)
# combine masks if needed
if attention_mask is not None and self.causal:
attention_mask = jnp.broadcast_to(
jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape
)
attention_mask = combine_masks(attention_mask, causal_mask)
elif self.causal:
attention_mask = causal_mask
elif attention_mask is not None:
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
key_states, value_states, attention_mask = self._concatenate_to_cache(
key_states, value_states, query_states, attention_mask
)
# Convert the boolean attention mask to an attention bias.
if attention_mask is not None:
# attention mask in the form of</s>
===========below chunk 1===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 2
<s> # Convert the boolean attention mask to an attention bias.
if attention_mask is not None:
# attention mask in the form of attention bias
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, -jnp.inf).astype(self.dtype),
)
else:
attention_bias = None
dropout_rng = None
if not deterministic and self.dropout > 0.0:
dropout_rng = self.make_rng("dropout")
if self.config.use_cosine_attention:
# normalize q and k
query_states = query_states / (
jnp.linalg.norm(query_states, axis=-1, keepdims=True) + 1e-8
)
key_states = key_states / (
jnp.linalg.norm(key_states, axis=-1, keepdims=True) + 1e-8
)
attn_weights = dot_product_attention_weights(
query_states,
key_states,
bias=attention_bias,
mask=attention_mask,
dropout_rng=dropout_rng,
dropout_rate=self.dropout,
broadcast_dropout=True,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
sink</s>
===========below chunk 2===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 3
<s>_iters=self.config.sinkhorn_iters,
causal=self.causal,
)
if self.config.use_cosine_attention:
# divide by tau
attn_weights = attn_weights / jnp.maximum(self.tau, 0.01)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
if self.config.use_head_scale:
# per Normformer
attn_output = attn_output * self.head_scale
attn_output = self._merge_heads(attn_output)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
===========unchanged ref 0===========
at: dalle_mini.model.modeling
dot_product_attention_weights(query: Any, key: Any, bias: Optional[Any]=None, mask: Optional[Any]=None, broadcast_dropout: bool=True, dropout_rng: Optional[PRNGKey]=None, dropout_rate: float=0.0, deterministic: bool=False, dtype: Any=jnp.float32, precision: PrecisionLike=None, sinkhorn_iters: int=1, causal: bool=False)
at: dalle_mini.model.modeling.FlaxBartAttention.setup
self.q_proj = dense(
kernel_init=deepnet_init()
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.k_proj = dense(
kernel_init=deepnet_init()
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.v_proj = dense(
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.causal_mask = make_causal_mask(
jnp.ones((1, self.config.image_length), dtype="bool"), dtype="bool"
)
at: transformers.models.bart.modeling_flax_bart.FlaxBartAttention
config: BartConfig
dropout: float = 0.0
causal: bool = False
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
_split_heads(hidden_states)
_concatenate_to_cache(key, value, query, attention_mask)
===========unchanged ref 1===========
__call__(self, hidden_states: jnp.ndarray, key_value_states: Optional[jnp.ndarray]=None, attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, deterministic: bool=True) -> Tuple[jnp.ndarray]
at: typing
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
|
|
dalle_mini.model.modeling/FlaxBartEncoderLayer.__call__
|
Modified
|
borisdayma~dalle-mini
|
769d20ac4c8fa675067364e61a9a1c6898b7ba91
|
feat: allow relative position (#156)
|
<23>:<add> q_length=self.config.max_text_length,
<add> k_length=self.config.max_text_length,
|
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
<0> res_gain = (
<1> deepnet_gain["encoder"]["alpha"](self.config)
<2> if self.config.use_deepnet_scaling
<3> else 1
<4> )
<5>
<6> embed_dim = self.config.d_model
<7> residual = hidden_states
<8> if self.config.ln_positions in ["normformer", "cogview", "preln"]:
<9> hidden_states = norm(
<10> self.config.ln_type,
<11> dtype=self.dtype,
<12> epsilon=1e-05,
<13> use_scale=self.config.force_ln_scale,
<14> )(hidden_states)
<15> hidden_states, attn_weights = FlaxBartAttention(
<16> config=self.config,
<17> embed_dim=embed_dim,
<18> num_heads=self.config.encoder_attention_heads,
<19> dropout=self.config.attention_dropout,
<20> bias=self.config.use_bias,
<21> dtype=self.dtype,
<22> is_encoder=True,
<23> )(hidden_states=hidden_states, attention_mask=attention_mask)
<24>
<25> if self.config.ln_positions in ["normformer", "swinv2", "cogview"]:
<26> hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
<27> hidden_states
<28> )
<29> hidden_states = nn.Dropout(rate=self.config.dropout)(
<30> hidden_states, deterministic=deterministic
<31> )
<32> hidden_states = residual * res_gain + hidden_states
<33> if self.config.ln</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 1
hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
hidden_states
)
residual = hidden_states
ff_block = (
GLU(
config=self.config,
ffn_dim=self.config.encoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=True,
)
if self.config.use_glu
else FFN(
config=self.config,
ffn_dim=self.config.encoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=True,
)
)
hidden_states = ff_block(hidden_states, deterministic=deterministic)
hidden_states = residual * res_gain + hidden_states
if self.add_norm or self.config.ln_positions in ["postln"]:
use_scale = (
self.use_scale
or self.config.ln_positions == "postln"
or self.config.force_ln_scale
)
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=use_scale,
)(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: dalle_mini.model.modeling.FlaxBartEncoderLayer
config: DalleBartConfig
dtype: jnp.dtype = jnp.float32
add_norm: bool = False
use_scale: bool = True
at: typing
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
===========changed ref 0===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
"""
Edits:
- causal mask is used only in decoder and considers image_length
- scale attention heads per NormFormer paper
"""
is_encoder: bool = False
+ q_length: int = None
+ k_length: int = None
===========changed ref 1===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def setup(self) -> None:
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {self.num_heads})."
)
dense = partial(
nn.Dense,
self.embed_dim,
use_bias=self.bias,
dtype=self.dtype,
)
gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
self.config
)
self.q_proj = dense(
kernel_init=deepnet_init()
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.k_proj = dense(
kernel_init=deepnet_init()
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.v_proj = dense(
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.out_proj = dense(
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.dropout_layer = nn.Dropout(rate=self.dropout)
if self.config.use_head_scale:
self.head_scale = self.param(
"head_scale", jax.nn.initializers.ones, (1,</s>
===========changed ref 2===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def setup(self) -> None:
# offset: 1
<s> self.head_scale = self.param(
"head_scale", jax.nn.initializers.ones, (1, 1, self.num_heads, 1)
)
if self.config.use_cosine_attention:
self.tau = self.param(
"tau",
jax.nn.initializers.constant(self.config.tau_init),
(1, self.num_heads, 1, 1),
)
+ if self.config.use_swin_position_embeddings:
+ self.rel_bias = nn.Embed(
+ self.q_length,
+ self.k_length * self.num_heads,
+ embedding_init=deepnet_init()
+ if self.config.use_deepnet_scaling
+ else jax.nn.initializers.normal(self.config.init_std),
+ )
+
if self.causal:
# used only in decoder
self.causal_mask = make_causal_mask(
jnp.ones((1, self.config.image_length), dtype="bool"), dtype="bool"
)
|
dalle_mini.model.modeling/FlaxBartDecoderLayer.__call__
|
Modified
|
borisdayma~dalle-mini
|
769d20ac4c8fa675067364e61a9a1c6898b7ba91
|
feat: allow relative position (#156)
|
<26>:<add> q_length=self.config.image_length,
<add> k_length=self.config.image_length,
|
<s> FlaxBartDecoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
<0> res_gain = (
<1> deepnet_gain["decoder"]["alpha"](self.config)
<2> if self.config.use_deepnet_scaling
<3> else 1
<4> )
<5>
<6> embed_dim = self.config.d_model
<7> residual = hidden_states
<8>
<9> # Self Attention
<10> if self.config.ln_positions in ["normformer", "cogview", "preln"]:
<11> hidden_states = norm(
<12> self.config.ln_type,
<13> dtype=self.dtype,
<14> epsilon=1e-05,
<15> use_scale=self.config.force_ln_scale,
<16> )(hidden_states)
<17> hidden_states, attn_weights = FlaxBartAttention(
<18> config=self.config,
<19> embed_dim=embed_dim,
<20> num_heads=self.config.decoder_attention_heads,
<21> dropout=self.config.attention_dropout,
<22> causal=True,
<23> bias=self.config.use_bias,
<24> dtype=self.dtype,
<25> is_encoder=False,
<26> )(
<27> hidden_states=hidden_states,
<28> attention_mask=attention_mask,
<29> init_cache=init_cache,
<30> )
<31>
<32> if self.config.ln_positions in ["normformer", "swinv2", "cogview"]:
<33> hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(</s>
|
===========below chunk 0===========
<s>Layer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 1
)
hidden_states = nn.Dropout(rate=self.config.dropout)(
hidden_states, deterministic=deterministic
)
hidden_states = residual * res_gain + hidden_states
if self.config.ln_positions in ["postln"]:
hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
hidden_states
)
# Cross Attention
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
if self.config.ln_positions in ["normformer", "cogview", "preln"]:
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(hidden_states)
hidden_states, cross_attn_weights = FlaxBartAttention(
config=self.config,
embed_dim=embed_dim,
num_heads=self.config.decoder_attention_heads,
dropout=self.config.attention_dropout,
bias=self.config.use_bias,
dtype=self.dtype,
is_encoder=False,
)(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
)
if self.config.ln_positions in ["normformer", "swinv2", "cog</s>
===========below chunk 1===========
<s>Layer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 2
<s>attention_mask,
)
if self.config.ln_positions in ["normformer", "swinv2", "cogview"]:
hidden_states = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05
)(hidden_states)
hidden_states = nn.Dropout(rate=self.config.dropout)(
hidden_states, deterministic=deterministic
)
hidden_states = residual * res_gain + hidden_states
if self.config.ln_positions in ["postln"]:
hidden_states = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05
)(hidden_states)
# Feed forward
residual = hidden_states
ff_block = (
GLU(
config=self.config,
ffn_dim=self.config.decoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=False,
)
if self.config.use_glu
else FFN(
config=self.config,
ffn_dim=self.config.decoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=False,
)
)
hidden_states = ff_block(hidden_states, deterministic=deterministic)
</s>
===========below chunk 2===========
<s>Layer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 3
<s>states = residual * res_gain + hidden_states
if self.add_norm or self.config.ln_positions in ["postln"]:
use_scale = (
self.use_scale
or self.config.ln_positions == "postln"
or self.config.force_ln_scale
)
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=use_scale,
)(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights, cross_attn_weights)
return outputs
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: dalle_mini.model.modeling.FlaxBartDecoderLayer
config: DalleBartConfig
dtype: jnp.dtype = jnp.float32
add_norm: bool = False
use_scale: bool = False
at: typing
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
===========changed ref 0===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
"""
Edits:
- causal mask is used only in decoder and considers image_length
- scale attention heads per NormFormer paper
"""
is_encoder: bool = False
+ q_length: int = None
+ k_length: int = None
|
dalle_mini.model.modeling/FlaxBartEncoder.setup
|
Modified
|
borisdayma~dalle-mini
|
769d20ac4c8fa675067364e61a9a1c6898b7ba91
|
feat: allow relative position (#156)
|
<9>:<add> if self.config.use_absolute_position_embeddings:
<add> self.embed_positions = nn.Embed(
<del> self.embed_positions = nn.Embed(
<10>:<add> self.config.max_text_length + self.offset, # image length for BOS
<del> self.config.max_text_length + self.offset,
<11>:<add> embed_dim,
<del> embed_dim,
<12>:<add> embedding_init=jax.nn.initializers.normal(self.config.init_std),
<del> embedding_init=jax.nn.initializers.normal(self.config.init_std),
<13>:<add> )
<del> )
|
# module: dalle_mini.model.modeling
+ class FlaxBartEncoder(nn.Module):
- class FlaxBartEncoder(FlaxBartEncoder):
def setup(self):
<0> self.dropout_layer = nn.Dropout(rate=self.config.dropout)
<1>
<2> embed_dim = self.config.d_model
<3> self.padding_idx = self.config.pad_token_id
<4> self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
<5>
<6> # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
<7> # and adjust num_embeddings appropriately. Other models don't have this hack
<8> self.offset = 0
<9> self.embed_positions = nn.Embed(
<10> self.config.max_text_length + self.offset,
<11> embed_dim,
<12> embedding_init=jax.nn.initializers.normal(self.config.init_std),
<13> )
<14> self.layers = FlaxBartEncoderLayerCollection(self.config, self.dtype)
<15> self.layernorm_embedding = norm(
<16> self.config.ln_type, dtype=self.dtype, epsilon=1e-05
<17> )
<18>
|
===========unchanged ref 0===========
at: dalle_mini.model.modeling
norm(type, *args, **kwargs)
at: math
sqrt(x: SupportsFloat, /) -> float
at: transformers.configuration_utils.PretrainedConfig.__init__
self.pad_token_id = kwargs.pop("pad_token_id", None)
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.d_model = d_model
self.dropout = dropout
self.init_std = init_std
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
at: transformers.models.bart.modeling_flax_bart.FlaxBartEncoder
config: BartConfig
embed_tokens: nn.Embed
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
setup(self)
===========changed ref 0===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoder(nn.Module):
- class FlaxBartEncoder(FlaxBartEncoder):
+ config: DalleBartConfig
+ embed_tokens: nn.Embed
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
"""
Edits:
- offset set to 0 (no padding token)
- use max_text_length instead of max_position_embeddings
- use custom FlaxBartEncoderLayerCollection
- embed_tokens cannot be None (issue at compile time)
"""
===========changed ref 1===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
"""
Edits:
- causal mask is used only in decoder and considers image_length
- scale attention heads per NormFormer paper
"""
is_encoder: bool = False
+ q_length: int = None
+ k_length: int = None
===========changed ref 2===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
res_gain = (
deepnet_gain["encoder"]["alpha"](self.config)
if self.config.use_deepnet_scaling
else 1
)
embed_dim = self.config.d_model
residual = hidden_states
if self.config.ln_positions in ["normformer", "cogview", "preln"]:
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(hidden_states)
hidden_states, attn_weights = FlaxBartAttention(
config=self.config,
embed_dim=embed_dim,
num_heads=self.config.encoder_attention_heads,
dropout=self.config.attention_dropout,
bias=self.config.use_bias,
dtype=self.dtype,
is_encoder=True,
+ q_length=self.config.max_text_length,
+ k_length=self.config.max_text_length,
)(hidden_states=hidden_states, attention_mask=attention_mask)
if self.config.ln_positions in ["normformer", "swinv2", "cogview"]:
hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
hidden_states
)
hidden_states = nn.Dropout(rate=self.config.dropout)(
hidden_states, deterministic=deterministic
)
hidden_states = residual * res_gain + hidden_states
if self.config.ln</s>
===========changed ref 3===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 1
<s>=deterministic
)
hidden_states = residual * res_gain + hidden_states
if self.config.ln_positions in ["postln"]:
hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
hidden_states
)
residual = hidden_states
ff_block = (
GLU(
config=self.config,
ffn_dim=self.config.encoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=True,
)
if self.config.use_glu
else FFN(
config=self.config,
ffn_dim=self.config.encoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=True,
)
)
hidden_states = ff_block(hidden_states, deterministic=deterministic)
hidden_states = residual * res_gain + hidden_states
if self.add_norm or self.config.ln_positions in ["postln"]:
use_scale = (
self.use_scale
or self.config.ln_positions == "postln"
or self.config.force_ln_scale
)
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=use_scale,
)(hidden_</s>
===========changed ref 4===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 2
<s>
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
===========changed ref 5===========
# module: dalle_mini.model.partitions
def _get_partition_rules():
return [
# embeddings
(("embed_positions", "embedding"), P("mp", None)),
(("embed_tokens", "embedding"), P("mp", None)),
+ (("rel_bias", "embedding"), P(None, "mp")),
# attention
(("(q_proj|k_proj|v_proj)", "kernel"), P(None, "mp")),
(("out_proj", "kernel"), P("mp", None)),
# FFN
(("Dense_0", "kernel"), P(None, "mp")),
(("GLU.*", "Dense_1", "kernel"), P(None, "mp")),
(("GLU.*", "Dense_2", "kernel"), P("mp", None)),
(("FFN.*", "Dense_1", "kernel"), P("mp", None)),
# layer norms
(("(bias|scale)",), None),
(("lm_head", "kernel"), P(None, "mp")),
# head scale and tau
(("(head_scale|tau)",), None),
]
|
dalle_mini.model.modeling/FlaxBartDecoder.setup
|
Modified
|
borisdayma~dalle-mini
|
769d20ac4c8fa675067364e61a9a1c6898b7ba91
|
feat: allow relative position (#156)
|
<11>:<add> if self.config.use_absolute_position_embeddings:
<add> self.embed_positions = nn.Embed(
<del> self.embed_positions = nn.Embed(
<12>:<add> self.config.image_length + self.offset, # image length for BOS
<del> self.config.image_length + self.offset, # image length for BOS
<13>:<add> embed_dim,
<del> embed_dim,
<14>:<add> embedding_init=jax.nn.initializers.normal(self.config.init_std),
<del> embedding_init=jax.nn.initializers.normal(self.config.init_std),
<15>:<add> )
<del> )
|
# module: dalle_mini.model.modeling
+ class FlaxBartDecoder(nn.Module):
- class FlaxBartDecoder(FlaxBartDecoder):
def setup(self):
<0> self.dropout_layer = nn.Dropout(rate=self.config.dropout)
<1>
<2> embed_dim = self.config.d_model
<3> self.padding_idx = self.config.pad_token_id
<4> self.embed_scale = (
<5> math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
<6> )
<7>
<8> # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
<9> # and adjust num_embeddings appropriately. Other models don't have this hack
<10> self.offset = 0
<11> self.embed_positions = nn.Embed(
<12> self.config.image_length + self.offset, # image length for BOS
<13> embed_dim,
<14> embedding_init=jax.nn.initializers.normal(self.config.init_std),
<15> )
<16>
<17> self.layers = FlaxBartDecoderLayerCollection(self.config, self.dtype)
<18> self.layernorm_embedding = norm(
<19> self.config.ln_type, dtype=self.dtype, epsilon=1e-05
<20> )
<21>
|
===========unchanged ref 0===========
at: dalle_mini.model.modeling
norm(type, *args, **kwargs)
at: math
sqrt(x: SupportsFloat, /) -> float
at: transformers.configuration_utils.PretrainedConfig.__init__
self.pad_token_id = kwargs.pop("pad_token_id", None)
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.d_model = d_model
self.dropout = dropout
self.init_std = init_std
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
at: transformers.models.bart.modeling_flax_bart.FlaxBartDecoder
config: BartConfig
embed_tokens: nn.Embed
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
setup(self)
===========changed ref 0===========
# module: dalle_mini.model.modeling
+ class FlaxBartDecoder(nn.Module):
- class FlaxBartDecoder(FlaxBartDecoder):
+ config: DalleBartConfig
+ embed_tokens: nn.Embed
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
"""
Edits:
- offset set to 0 (no padding token)
- use image_length instead of max_position_embeddings
- use custom FlaxBartDecoderLayerCollection
- embed_tokens cannot be None (issue at compile time)
"""
===========changed ref 1===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoder(nn.Module):
- class FlaxBartEncoder(FlaxBartEncoder):
+ config: DalleBartConfig
+ embed_tokens: nn.Embed
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
"""
Edits:
- offset set to 0 (no padding token)
- use max_text_length instead of max_position_embeddings
- use custom FlaxBartEncoderLayerCollection
- embed_tokens cannot be None (issue at compile time)
"""
===========changed ref 2===========
# module: dalle_mini.model.modeling
+ class FlaxBartEncoder(nn.Module):
- class FlaxBartEncoder(FlaxBartEncoder):
def setup(self):
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
embed_dim = self.config.d_model
self.padding_idx = self.config.pad_token_id
self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
# Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 0
+ if self.config.use_absolute_position_embeddings:
+ self.embed_positions = nn.Embed(
- self.embed_positions = nn.Embed(
+ self.config.max_text_length + self.offset, # image length for BOS
- self.config.max_text_length + self.offset,
+ embed_dim,
- embed_dim,
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
- embedding_init=jax.nn.initializers.normal(self.config.init_std),
+ )
- )
self.layers = FlaxBartEncoderLayerCollection(self.config, self.dtype)
self.layernorm_embedding = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05
)
===========changed ref 3===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
"""
Edits:
- causal mask is used only in decoder and considers image_length
- scale attention heads per NormFormer paper
"""
is_encoder: bool = False
+ q_length: int = None
+ k_length: int = None
===========changed ref 4===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
res_gain = (
deepnet_gain["encoder"]["alpha"](self.config)
if self.config.use_deepnet_scaling
else 1
)
embed_dim = self.config.d_model
residual = hidden_states
if self.config.ln_positions in ["normformer", "cogview", "preln"]:
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(hidden_states)
hidden_states, attn_weights = FlaxBartAttention(
config=self.config,
embed_dim=embed_dim,
num_heads=self.config.encoder_attention_heads,
dropout=self.config.attention_dropout,
bias=self.config.use_bias,
dtype=self.dtype,
is_encoder=True,
+ q_length=self.config.max_text_length,
+ k_length=self.config.max_text_length,
)(hidden_states=hidden_states, attention_mask=attention_mask)
if self.config.ln_positions in ["normformer", "swinv2", "cogview"]:
hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
hidden_states
)
hidden_states = nn.Dropout(rate=self.config.dropout)(
hidden_states, deterministic=deterministic
)
hidden_states = residual * res_gain + hidden_states
if self.config.ln</s>
|
dalle_mini.model.modeling/dot_product_attention_weights
|
Modified
|
borisdayma~dalle-mini
|
2c583b3c03dfc59ff9b394639290fb9d1ed9eea8
|
fix: sinkformer
|
<26>:<add> if not is_encoder or sinkhorn_iters == 1:
<del> if causal or sinkhorn_iters == 1:
|
<s> mask: Optional[Any] = None,
embed_pos: Optional[Any] = None,
broadcast_dropout: bool = True,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.0,
deterministic: bool = False,
dtype: Any = jnp.float32,
precision: PrecisionLike = None,
sinkhorn_iters: int = 1,
+ is_encoder: bool = False,
- causal: bool = False,
):
<0> """
<1> Computes dot-product attention weights given query and key.
<2> mask is included into the bias.
<3>
<4> Adapted from flax.linen.attention.dot_product_attention_weights"
<5> """
<6> assert query.ndim == key.ndim, "q, k must have same rank."
<7> assert query.shape[:-3] == key.shape[:-3], "q, k batch dims must match."
<8> assert query.shape[-2] == key.shape[-2], "q, k num_heads must match."
<9> assert query.shape[-1] == key.shape[-1], "q, k depths must match."
<10>
<11> # calculate attention matrix
<12> depth = query.shape[-1]
<13> query = query / jnp.sqrt(depth).astype(dtype)
<14> # attn weight shape is (batch..., num_heads, q_length, kv_length)
<15> attn_weights = jnp.einsum("...qhd,...khd->...hqk", query, key, precision=precision)
<16>
<17> # apply attention bias: masking, dropout, proximity bias, etc.
<18> if bias is not None:
<19> attn_weights = attn_weights + bias
<20>
<21> # add relative position
<22> if embed_pos is not None:
<23> attn_weights = attn_weights + embed_pos
<24>
<25> # normalize the attention weights
<26> if causal or sinkhorn_iters == 1:
<27> # sinkhorn does not work for causal (leaks info of</s>
|
===========below chunk 0===========
<s>] = None,
embed_pos: Optional[Any] = None,
broadcast_dropout: bool = True,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.0,
deterministic: bool = False,
dtype: Any = jnp.float32,
precision: PrecisionLike = None,
sinkhorn_iters: int = 1,
+ is_encoder: bool = False,
- causal: bool = False,
):
# offset: 1
attn_weights = jax.nn.softmax(attn_weights).astype(dtype)
else:
# adapted from https://github.com/lucidrains/sinkhorn-transformer
for i in range(sinkhorn_iters):
# when causal, some attn_weights have been set to -inf through bias
if i % 2 == 0:
attn_weights -= jax.nn.logsumexp(attn_weights, axis=-1, keepdims=True)
else:
attn_weights -= jax.nn.logsumexp(attn_weights, axis=-2, keepdims=True)
if mask is not None:
attn_weights = jnp.where(mask, attn_weights, -jnp.inf)
attn_weights = jnp.exp(attn_weights).astype(dtype)
# apply attention dropout
if not deterministic and dropout_rate > 0.0:
keep_prob = 1.0 - dropout_rate
if broadcast_dropout:
# dropout is broadcast across the batch + head dimensions
dropout_shape = tuple([1] * (key.ndim - 2)) + attn_weights.shape[-2:]
keep = jax.random.bernoulli(dropout_rng, keep_prob, dropout_shape)
else:
keep = jax.random.bernoulli(dropout_rng, keep_prob, attn_weights.shape)
multiplier = keep.astype(attn_weights.dtype) / jnp.asarray(
keep_prob, dtype=dtype
</s>
===========below chunk 1===========
<s>] = None,
embed_pos: Optional[Any] = None,
broadcast_dropout: bool = True,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.0,
deterministic: bool = False,
dtype: Any = jnp.float32,
precision: PrecisionLike = None,
sinkhorn_iters: int = 1,
+ is_encoder: bool = False,
- causal: bool = False,
):
# offset: 2
<s> = keep.astype(attn_weights.dtype) / jnp.asarray(
keep_prob, dtype=dtype
)
attn_weights = attn_weights * multiplier
return attn_weights
|
dalle_mini.model.modeling/FlaxBartAttention.__call__
|
Modified
|
borisdayma~dalle-mini
|
2c583b3c03dfc59ff9b394639290fb9d1ed9eea8
|
fix: sinkformer
|
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
<0> """Input shape: Batch x Time x Channel"""
<1>
<2> # if key_value_states are provided this layer is used as a cross-attention layer
<3> # for the decoder
<4> is_cross_attention = key_value_states is not None
<5> batch_size = hidden_states.shape[0]
<6>
<7> # get query proj
<8> query_states = self.q_proj(hidden_states)
<9> # get key, value proj
<10> if is_cross_attention:
<11> # cross_attentions
<12> key_states = self.k_proj(key_value_states)
<13> value_states = self.v_proj(key_value_states)
<14> else:
<15> # self_attention
<16> key_states = self.k_proj(hidden_states)
<17> value_states = self.v_proj(hidden_states)
<18>
<19> query_states = self._split_heads(query_states)
<20> key_states = self._split_heads(key_states)
<21> value_states = self._split_heads(value_states)
<22>
<23> # handle cache prepare causal attention mask
<24> if self.causal:
<25> query_length, key_length = query_states.shape[1], key_states.shape[1]
<26> if self.has_variable("cache", "cached_key"):
<27> mask_shift = self.variables["cache"]["cache_index"]
<28> max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
<29> causal_mask = lax.dynamic_slice(
</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 1
(0, 0, mask_shift, 0),
(1, 1, query_length, max_decoder_length),
)
else:
causal_mask = self.causal_mask[:, :, :query_length, :key_length]
causal_mask = jnp.broadcast_to(
causal_mask, (batch_size,) + causal_mask.shape[1:]
)
# combine masks if needed
if attention_mask is not None and self.causal:
attention_mask = jnp.broadcast_to(
jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape
)
attention_mask = combine_masks(attention_mask, causal_mask)
elif self.causal:
attention_mask = causal_mask
elif attention_mask is not None:
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
key_states, value_states, attention_mask = self._concatenate_to_cache(
key_states, value_states, query_states, attention_mask
)
# Convert the boolean attention mask to an attention bias.
if attention_mask is not None:
# attention mask in the form of</s>
===========below chunk 1===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 2
<s> # Convert the boolean attention mask to an attention bias.
if attention_mask is not None:
# attention mask in the form of attention bias
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, -jnp.inf).astype(self.dtype),
)
else:
attention_bias = None
dropout_rng = None
if not deterministic and self.dropout > 0.0:
dropout_rng = self.make_rng("dropout")
if self.config.use_cosine_attention:
# normalize q and k
query_states = query_states / (
jnp.linalg.norm(query_states, axis=-1, keepdims=True) + 1e-8
)
key_states = key_states / (
jnp.linalg.norm(key_states, axis=-1, keepdims=True) + 1e-8
)
# relative position embeddings
if self.config.use_swin_position_embeddings:
position_ids = jnp.arange(self.q_length)
embed_pos = self.rel_bias(position_ids)
embed_pos = rearrange(embed_pos, "q (k h) -> 1 h q k", h=self.num_heads)
else:
</s>
===========below chunk 2===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 3
<s>_pos = None
attn_weights = dot_product_attention_weights(
query_states,
key_states,
bias=attention_bias,
mask=attention_mask,
embed_pos=embed_pos,
dropout_rng=dropout_rng,
dropout_rate=self.dropout,
broadcast_dropout=True,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
sinkhorn_iters=self.config.sinkhorn_iters,
causal=self.causal,
)
if self.config.use_cosine_attention:
# divide by tau
attn_weights = attn_weights / jnp.maximum(self.tau, 0.01)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
if self.config.use_head_scale:
# per Normformer
attn_output = attn_output * self.head_scale
attn_output = self._merge_heads(attn_output)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
===========unchanged ref 0===========
at: dalle_mini.model.modeling
dot_product_attention_weights(query: Any, key: Any, bias: Optional[Any]=None, mask: Optional[Any]=None, embed_pos: Optional[Any]=None, broadcast_dropout: bool=True, dropout_rng: Optional[PRNGKey]=None, dropout_rate: float=0.0, deterministic: bool=False, dtype: Any=jnp.float32, precision: PrecisionLike=None, sinkhorn_iters: int=1, is_encoder: bool=False)
at: dalle_mini.model.modeling.FlaxBartAttention.setup
self.q_proj = dense(
kernel_init=deepnet_init()
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.k_proj = dense(
kernel_init=deepnet_init()
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.v_proj = dense(
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.causal_mask = make_causal_mask(
jnp.ones((1, self.config.image_length), dtype="bool"), dtype="bool"
)
at: transformers.models.bart.modeling_flax_bart.FlaxBartAttention
config: BartConfig
embed_dim: int
num_heads: int
dropout: float = 0.0
causal: bool = False
bias: bool = True
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
_split_heads(hidden_states)
|
|
tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices/materialize_matrix_from_concat
|
Modified
|
borisdayma~dalle-mini
|
9ecdd3fbced469d4cbe5ffdd8ff5eff3416ce402
|
feat: update shampoo
|
<6>:<add> If not specified, it is inferred from the shape of block_rows_concat.
<7>:<add> if num_blocks is None:
<add> num_blocks = find_num_blocks(block_rows_concat)
<add>
|
# module: tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices
@functools.partial(jax.jit, static_argnames=("num_blocks"))
def materialize_matrix_from_concat(
block_rows_concat,
+ num_blocks=None,
- num_blocks,
):
<0> """Returns a materialized symmetric matrix from concatenated slices.
<1>
<2> Args:
<3> block_rows_concat: The matrix represented as the concatenated
<4> lower-triangular blocks.
<5> num_blocks: The number of block-rows used to represent the symmetric matrix.
<6> """
<7> block_size = block_rows_concat.shape[-2]
<8>
<9> block_rows = [
<10> block_rows_concat[
<11> Ellipsis,
<12> (k * (k + 1))
<13> // 2
<14> * block_size : (((k + 1) * (k + 2)) // 2 + 1)
<15> * block_size,
<16> ]
<17> for k in range(num_blocks)
<18> ]
<19>
<20> return materialize_matrix(SlicedSymmetricMatrix(block_rows=block_rows))
<21>
|
===========unchanged ref 0===========
at: functools
partial(func: Callable[..., _T], *args: Any, **kwargs: Any)
partial(func, *args, **keywords, /) -> function with partial application()
at: tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices
block_rows = [
block_rows_concat[
Ellipsis,
(k * (k + 1))
// 2
* block_size : (((k + 1) * (k + 2)) // 2 + 1)
* block_size,
]
for k in range(num_blocks)
]
find_num_blocks(block_rows_concat)
|
tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices/find_num_blocks
|
Modified
|
borisdayma~dalle-mini
|
9ecdd3fbced469d4cbe5ffdd8ff5eff3416ce402
|
feat: update shampoo
|
<18>:<del> num_blocks = np.round((np.sqrt(8 * total_blocks + 1) - 1) / 2).astype(np.int32)
<19>:<del> if num_blocks * (num_blocks + 1) / 2 != total_blocks:
<20>:<del> raise ValueError(
<21>:<del> "Could not determine an appropriate number of blocks for "
<22>:<del> "the concatenated matrix."
<23>:<del> )
<24>:<del> else:
<25>:<del> return num_blocks
<26>:<add> return num_blocks_from_total_blocks(total_blocks)
|
# module: tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices
def find_num_blocks(block_rows_concat):
<0> """Returns the number of (row) blocks representing the concatenated matrix.
<1>
<2> For example, an input with dimensions [256, 2560] represents 10 square blocks,
<3> which matches 4 lower-triangular block rows (1+2+3+4). So this function will
<4> return 4.
<5>
<6> Use ordinary numpy functions here so that the returned value is static.
<7>
<8> Args:
<9> block_rows_concat: The concatenated block array.
<10>
<11> Raises:
<12> ValueError: When the dimensions of the matrix do not correspond to a lower
<13> triangular block representation.
<14> """
<15> # Compute the number of square blocks used to represent the matrix.
<16> total_blocks = block_rows_concat.shape[-1] / block_rows_concat.shape[-2]
<17> # Determine the number of block rows by inverting y = x*(x+1)/2.
<18> num_blocks = np.round((np.sqrt(8 * total_blocks + 1) - 1) / 2).astype(np.int32)
<19> if num_blocks * (num_blocks + 1) / 2 != total_blocks:
<20> raise ValueError(
<21> "Could not determine an appropriate number of blocks for "
<22> "the concatenated matrix."
<23> )
<24> else:
<25> return num_blocks
<26>
|
===========unchanged ref 0===========
at: numpy
int32 = signedinteger[_32Bit]
sqrt: _UFunc_Nin1_Nout1[L['sqrt'], L[10], None]
at: numpy.core.fromnumeric
around(a: _ArrayLikeBool_co, decimals: SupportsIndex=..., out: None=...) -> NDArray[float16]
around(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, decimals: SupportsIndex=..., out: _ArrayType=...) -> _ArrayType
around(a: _BoolLike_co, decimals: SupportsIndex=..., out: None=...) -> float16
around(a: _ComplexLike_co | object_, decimals: SupportsIndex=..., out: None=...) -> Any
around(a: _ArrayLikeComplex_co | _ArrayLikeObject_co, decimals: SupportsIndex=..., out: None=...) -> NDArray[Any]
around(a: _ArrayLike[_SCT_uifcO], decimals: SupportsIndex=..., out: None=...) -> NDArray[_SCT_uifcO]
around(a: _SCT_uifcO, decimals: SupportsIndex=..., out: None=...) -> _SCT_uifcO
at: numpy.generic
astype(dtype: _DTypeLike[_ScalarType], order: _OrderKACF=..., casting: _CastingKind=..., subok: bool=..., copy: bool | _CopyMode=...) -> _ScalarType
astype(dtype: DTypeLike, order: _OrderKACF=..., casting: _CastingKind=..., subok: bool=..., copy: bool | _CopyMode=...) -> Any
===========changed ref 0===========
# module: tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices
@functools.partial(jax.jit, static_argnames=("num_blocks"))
def materialize_matrix_from_concat(
block_rows_concat,
+ num_blocks=None,
- num_blocks,
):
"""Returns a materialized symmetric matrix from concatenated slices.
Args:
block_rows_concat: The matrix represented as the concatenated
lower-triangular blocks.
num_blocks: The number of block-rows used to represent the symmetric matrix.
+ If not specified, it is inferred from the shape of block_rows_concat.
"""
+ if num_blocks is None:
+ num_blocks = find_num_blocks(block_rows_concat)
+
block_size = block_rows_concat.shape[-2]
block_rows = [
block_rows_concat[
Ellipsis,
(k * (k + 1))
// 2
* block_size : (((k + 1) * (k + 2)) // 2 + 1)
* block_size,
]
for k in range(num_blocks)
]
return materialize_matrix(SlicedSymmetricMatrix(block_rows=block_rows))
|
tools.train.scalable_shampoo.distributed_shampoo/matrix_inverse_pth_root
|
Modified
|
borisdayma~dalle-mini
|
9ecdd3fbced469d4cbe5ffdd8ff5eff3416ce402
|
feat: update shampoo
|
<17>:<add> precision: precision XLA related flag, the available options are: a)
<del> precision: precision XLA related flag, the available options are:
<18>:<add> lax.Precision.DEFAULT (better step time, but not precise) b)
<del> a) lax.Precision.DEFAULT (better step time, but not precise)
<19>:<add> lax.Precision.HIGH (increased precision, slower) c) lax.Precision.HIGHEST
<del> b) lax.Precision.HIGH (increased precision, slower)
<20>:<add> (best possible precision, slowest)
<del> c) lax.Precision.HIGHEST (best possible precision, slowest)
<25>:<add>
<add> # If the input is not square, materialize it from the concatenated form.
<add> if matrix.shape[0] != matrix.shape[1]:
<add> matrix = symmetric_matrices.materialize_matrix_from_concat(matrix)
|
# module: tools.train.scalable_shampoo.distributed_shampoo
def matrix_inverse_pth_root(
matrix,
p,
num_iters=100,
ridge_epsilon=1e-6,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
):
<0> """Computes `matrix^(-1/p)`, where `p` is a positive integer.
<1>
<2> This function uses the Coupled newton iterations algorithm for
<3> the computation of a matrix's inverse pth root.
<4>
<5>
<6> References:
<7> [Functions of Matrices, Theory and Computation,
<8> Nicholas J Higham, Pg 184, Eq 7.18](
<9> https://epubs.siam.org/doi/book/10.1137/1.9780898717778)
<10>
<11> Args:
<12> matrix: the symmetric PSD matrix whose power it to be computed
<13> p: exponent, for p a positive integer.
<14> num_iters: Maximum number of iterations.
<15> ridge_epsilon: Ridge epsilon added to make the matrix positive definite.
<16> error_tolerance: Error indicator, useful for early termination.
<17> precision: precision XLA related flag, the available options are:
<18> a) lax.Precision.DEFAULT (better step time, but not precise)
<19> b) lax.Precision.HIGH (increased precision, slower)
<20> c) lax.Precision.HIGHEST (best possible precision, slowest)
<21>
<22> Returns:
<23> matrix^(-1/p)
<24> """
<25>
<26> assert matrix.shape[0] == matrix.shape[1]
<27>
<28> # We use _MAT_INV_PTH_ROOT_DTYPE for the matrix inverse pth root.
<29> # Switch to f64 if you have hardware that supports it. Enable the jax flag
<30> # jax_enable_x64 for this to work.
<31> matrix_size = matrix.shape[0]
<32> orig_dtype = matrix.dtype
<33> matrix = matrix</s>
|
===========below chunk 0===========
# module: tools.train.scalable_shampoo.distributed_shampoo
def matrix_inverse_pth_root(
matrix,
p,
num_iters=100,
ridge_epsilon=1e-6,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
):
# offset: 1
alpha = jnp.asarray(-1.0 / p, _MAT_INV_PTH_ROOT_DTYPE)
identity = jnp.eye(matrix_size, dtype=_MAT_INV_PTH_ROOT_DTYPE)
_, max_ev = power_iteration(
matrix=matrix, num_iters=100, error_tolerance=1e-6, precision=precision
)
ridge_epsilon = ridge_epsilon * jnp.maximum(max_ev, 1e-6)
def _iter_condition(state):
(i, unused_mat_m, unused_mat_h, unused_old_mat_h, error, run_step) = state
error_above_threshold = jnp.logical_and(error > error_tolerance, run_step)
return jnp.logical_and(i < num_iters, error_above_threshold)
def _iter_body(state):
(i, mat_m, mat_h, unused_old_mat_h, error, unused_run_step) = state
mat_m_i = (1 - alpha) * identity + alpha * mat_m
new_mat_m = jnp.matmul(mat_power(mat_m_i, p), mat_m, precision=precision)
new_mat_h = jnp.matmul(mat_h, mat_m_i, precision=precision)
new_error = jnp.max(jnp.abs(new_mat_m - identity))
# sometimes error increases after an iteration before decreasing and
# converging. 1.2 factor is used to bound the maximal allowed increase.
return (i + 1, new_mat_m, new_mat_h,</s>
===========below chunk 1===========
# module: tools.train.scalable_shampoo.distributed_shampoo
def matrix_inverse_pth_root(
matrix,
p,
num_iters=100,
ridge_epsilon=1e-6,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
):
# offset: 2
<s> is used to bound the maximal allowed increase.
return (i + 1, new_mat_m, new_mat_h, mat_h, new_error, new_error < error * 1.2)
if matrix_size == 1:
resultant_mat_h = (matrix + ridge_epsilon) ** alpha
error = 0
else:
damped_matrix = matrix + ridge_epsilon * identity
z = (1 + p) / (2 * jnp.linalg.norm(damped_matrix))
new_mat_m_0 = damped_matrix * z
new_error = jnp.max(jnp.abs(new_mat_m_0 - identity))
new_mat_h_0 = identity * jnp.power(z, 1.0 / p)
init_state = tuple([0, new_mat_m_0, new_mat_h_0, new_mat_h_0, new_error, True])
_, mat_m, mat_h, old_mat_h, error, convergence = lax.while_loop(
_iter_condition, _iter_body, init_state
)
error = jnp.max(jnp.abs(mat_m - identity)).astype(jnp.float32)
is_converged = jnp.asarray(convergence, old_mat_h.dtype)
resultant_mat_h = is_converged * mat_h + (1 - is_converged) * old_mat_h
resultant_mat_h =
===========unchanged ref 0===========
at: tools.train.scalable_shampoo.distributed_shampoo
_MAT_INV_PTH_ROOT_DTYPE = jnp.float64
power_iteration(matrix, num_iters=100, error_tolerance=1e-6, precision=lax.Precision.HIGHEST)
mat_power(mat_m, p, precision=lax.Precision.HIGHEST)
at: tools.train.scalable_shampoo.distributed_shampoo.mat_power
power = jnp.eye(mat_m.shape[0], dtype=_MAT_INV_PTH_ROOT_DTYPE)
_iter_condition(state)
_iter_body(state)
at: tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices
materialize_matrix_from_concat(block_rows_concat, num_blocks)
===========changed ref 0===========
# module: tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices
@functools.partial(jax.jit, static_argnames=("num_blocks"))
def materialize_matrix_from_concat(
block_rows_concat,
+ num_blocks=None,
- num_blocks,
):
"""Returns a materialized symmetric matrix from concatenated slices.
Args:
block_rows_concat: The matrix represented as the concatenated
lower-triangular blocks.
num_blocks: The number of block-rows used to represent the symmetric matrix.
+ If not specified, it is inferred from the shape of block_rows_concat.
"""
+ if num_blocks is None:
+ num_blocks = find_num_blocks(block_rows_concat)
+
block_size = block_rows_concat.shape[-2]
block_rows = [
block_rows_concat[
Ellipsis,
(k * (k + 1))
// 2
* block_size : (((k + 1) * (k + 2)) // 2 + 1)
* block_size,
]
for k in range(num_blocks)
]
return materialize_matrix(SlicedSymmetricMatrix(block_rows=block_rows))
===========changed ref 1===========
# module: tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices
+ @functools.partial(jax.jit, static_argnames=("block_size"))
+ def slice_symmetric_matrix_concat(
+ mat,
+ block_size,
+ ):
+ """Returns the concatenated sliced row blocks.
+
+ Args:
+ mat: A symmetric matrix.
+ block_size: The size of the row slices.
+ """
+ sliced_symmetric_matrix = slice_symmetric_matrix(mat=mat, block_size=block_size)
+ return jnp.concatenate(sliced_symmetric_matrix.block_rows, axis=-1)
+
===========changed ref 2===========
# module: tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices
+ def sliced_matrix_diag(mat):
+ """Returns the diagonal of the symmetric matrix.
+
+ Args:
+ mat: The symmetric matrix represented in concatenated block form.
+ """
+ rows, cols = mat.shape
+ total_blocks = cols // rows
+ num_blocks = num_blocks_from_total_blocks(total_blocks)
+ diags = []
+ for i in range(num_blocks):
+ last_index = rows * ((i + 2) * (i + 1)) // 2
+ first_index = last_index - rows
+ diags.append(jnp.diag(mat[Ellipsis, first_index:last_index]))
+ return jnp.concatenate(diags, axis=-1)
+
|
dalle_mini.model.text/TextNormalizer.__call__
|
Modified
|
borisdayma~dalle-mini
|
7ef7bd9239040e63d9e0ec1a8330e91584085235
|
feat(text): support emojis (#154)
|
<4>:<add> # decode emojis (would be removed by unidecode)
<add> t = emoji.demojize(t)
|
# module: dalle_mini.model.text
class TextNormalizer:
def __call__(self, t):
<0> # fix some characters
<1> t = ftfy.fix_text(t)
<2> # fix html
<3> t = fix_html(t)
<4> # decode and simplify text: see unidecode library
<5> t = unidecode(t)
<6> # lower case
<7> t = t.lower()
<8> # replace <PERSON> (for CC12M)
<9> t = replace_person_token(t)
<10> # remove wiki reference (for WIT)
<11> t = remove_wiki_ref(t)
<12> # remove html tags
<13> t = remove_html_tags(t)
<14> # remove urls
<15> t = remove_urls(t)
<16> # remove commas in numbers
<17> t = remove_comma_numbers(t)
<18> # handle dots in numbers and quotes - Part 1
<19> t = pre_process_dot_numbers(t)
<20> t = pre_process_quotes(t)
<21> t = pre_process_dates(t)
<22> # handle special characters
<23> t = handle_special_chars(t)
<24> # handle hashtags
<25> t = expand_hashtags(t, self._hashtag_processor)
<26> # ignore useless characters
<27> t = ignore_chars(t)
<28> # simplify quotes
<29> t = simplify_quotes(t)
<30> # all punctuation becomes commas
<31> t = replace_punctuation_with_commas(t)
<32> # handle dots in numbers and quotes - Part 2
<33> t = post_process_dot_numbers(t)
<34> t = post_process_quotes(t)
<35> t = post_process_dates(t)
<36> # handle repeating characters
<37> t = remove_repeating_chars(t)
<38> # merge quotes
<39> t = merge_quotes(t)
<40> # merge commas
<41> t = merge_commas(t)
<42> # remove multiple spaces
<43> t = remove_extra_spaces(t)</s>
|
===========below chunk 0===========
# module: dalle_mini.model.text
class TextNormalizer:
def __call__(self, t):
# offset: 1
t = remove_first_last_commas(t)
# always start with a space
return f" {t}"
===========unchanged ref 0===========
at: dalle_mini.model.text
replace_person_token(t)
fix_html(t)
replace_punctuation_with_commas(t)
simplify_quotes(t)
merge_quotes(t)
remove_comma_numbers(t)
pre_process_dot_numbers(t)
post_process_dot_numbers(t)
pre_process_quotes(t)
post_process_quotes(t)
pre_process_dates(t)
post_process_dates(t)
merge_commas(t)
handle_special_chars(t)
expand_hashtags(t, hashtag_processor)
ignore_chars(t)
remove_extra_spaces(t)
remove_repeating_chars(t)
remove_urls(t)
remove_html_tags(t)
remove_wiki_ref(t)
at: dalle_mini.model.text.TextNormalizer.__init__
self._hashtag_processor = HashtagProcessor()
|
tools.train.train/TrainingArguments.__post_init__
|
Modified
|
borisdayma~dalle-mini
|
01996042a17f31230af032748a7583c5419dabcf
|
Merge pull request #162 from borisdayma/demo-improvements
|
<40>:<add> assert self.shard_shampoo_across in [
<add> "dp",
<add> "mp",
<add> "2d",
<add> ], f"Shard shampoo across {self.shard_shampoo_across} not supported."
|
# module: tools.train.train
@dataclass
class TrainingArguments:
def __post_init__(self):
<0> if self.assert_TPU_available:
<1> assert (
<2> jax.local_device_count() == 8
<3> ), "TPUs in use, please check running processes"
<4> if self.output_dir.startswith("gs://"):
<5> assert (
<6> storage is not None
<7> ), 'Could not find google.storage. Install with "pip install google-cloud-storage"'
<8> assert self.optim in [
<9> "distributed_shampoo",
<10> "adam",
<11> "adafactor",
<12> ], f"Selected optimizer not supported: {self.optim}"
<13> assert self.graft_type in [
<14> "rmsprop_normalized",
<15> "rmsprop",
<16> "adagrad",
<17> "adagrad_normalized",
<18> "sgd",
<19> "sqrt_n",
<20> ], f"Selected graft type not supported: {self.graft_type}"
<21> assert self.lr_decay in [
<22> None,
<23> "linear",
<24> "exponential",
<25> ], f"Selected learning rate decay not supported: {self.lr_decay}"
<26> if self.per_device_eval_batch_size is None:
<27> self.per_device_eval_batch_size = self.per_device_train_batch_size
<28> if self.log_norm_steps is True:
<29> self.log_norm_steps = self.logging_steps
<30> if (
<31> os.path.exists(self.output_dir)
<32> and os.listdir(self.output_dir)
<33> and self.do_train
<34> and not self.overwrite_output_dir
<35> ):
<36> raise ValueError(
<37> f"Output directory ({self.output_dir}) already exists and is not empty."
<38> "Use --overwrite_output_dir to overcome."
<39> )
<40> assert (
<41> self.mp_devices > 0
<42> )</s>
|
===========below chunk 0===========
# module: tools.train.train
@dataclass
class TrainingArguments:
def __post_init__(self):
# offset: 1
assert (
jax.device_count() % self.mp_devices == 0
), f"Number of available devices ({jax.device_count()} must be divisible by number of devices used for model parallelism ({self.mp_devices})."
self.dp_devices = jax.device_count() // self.mp_devices
===========unchanged ref 0===========
at: dataclasses
field(*, default_factory: Callable[[], _T], init: bool=..., repr: bool=..., hash: Optional[bool]=..., compare: bool=..., metadata: Optional[Mapping[str, Any]]=...) -> _T
field(*, init: bool=..., repr: bool=..., hash: Optional[bool]=..., compare: bool=..., metadata: Optional[Mapping[str, Any]]=...) -> Any
field(*, default: _T, init: bool=..., repr: bool=..., hash: Optional[bool]=..., compare: bool=..., metadata: Optional[Mapping[str, Any]]=...) -> _T
at: os
listdir(path: bytes) -> List[bytes]
listdir(path: int) -> List[str]
listdir(path: Optional[str]=...) -> List[str]
listdir(path: _PathLike[str]) -> List[str]
at: os.path
exists(path: Union[AnyStr, _PathLike[AnyStr]]) -> bool
at: tools.train.train
storage = None
at: tools.train.train.TrainingArguments
output_dir: str = field(
metadata={
"help": "The output directory where the model predictions and checkpoints will be written."
},
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory. "
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(
default=False, metadata={"help": "Whether to run eval on the validation set."}
)
per_device_train_batch_size: int = field(
default=8,
metadata={"help": "Batch size per data parallel device for training."},
)
===========unchanged ref 1===========
per_device_eval_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Batch size per data parallel device for evaluation. Same as training batch size if not set."
},
)
gradient_accumulation_steps: int = field(
default=1,
metadata={
"help": "Number of updates steps to accumulate before performing an update pass."
},
)
gradient_checkpointing: bool = field(
default=False, metadata={"help": "Use gradient checkpointing."}
)
learning_rate: float = field(
default=5e-5, metadata={"help": "The initial learning rate."}
)
optim: str = field(
default="distributed_shampoo",
metadata={
"help": 'The optimizer to use. Can be "distributed_shampoo" (default), "adam" or "adafactor"'
},
)
beta1: float = field(
default=0.9,
metadata={"help": "Beta1 for Adam & Distributed Shampoo."},
)
beta2: float = field(
default=0.999,
metadata={"help": "Beta2 for for Adam & Distributed Shampoo."},
)
adam_epsilon: float = field(
default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."}
)
max_grad_norm: float = field(
default=1.0, metadata={"help": "Max gradient norm for Adafactor."}
)
block_size: int = field(
default=1024,
metadata={"help": "Chunked size for large layers with Distributed Shampoo."},
)
preconditioning_compute_steps: int = field(
default=10, metadata={"help": "Number of steps to update preconditioner."}
)
===========unchanged ref 2===========
skip_preconditioning_dim_size_gt: int = field(
default=4096,
metadata={"help": "Max size for preconditioning with Distributed Shampoo."},
)
graft_type: str = field(
default="rmsprop_normalized",
metadata={
"help": "The type of grafting to use. Can be 'rmsprop_normalized' (default), 'rmsprop', 'adagrad', 'adagrad_normalized', 'sgd' or 'sqrt_n'"
},
)
optim_quantized: bool = field(
default=False,
metadata={
"help": "Whether to quantize optimizer (only supported with Distributed Shampoo)."
},
)
shard_shampoo_across: str = field(
default="dp",
metadata={
"help": "Whether to shard the optimizer across data devices (dp), model devices (mp) or both (2d)."
},
)
num_train_epochs: int = field(
default=3, metadata={"help": "Total number of training epochs to perform."}
)
warmup_steps: int = field(
default=0, metadata={"help": "Linear warmup over warmup_steps."}
)
lr_decay: str = field(
default=None,
metadata={
"help": "Decay to be used in the learning rate scheduler. Can be None (default), linear or exponential."
},
)
lr_transition_steps: int = field(
default=None,
metadata={
"help": "Number of transition steps associated with learning rate decay when using exponential decay."
},
)
lr_decay_rate: float = field(
default=None,
metadata={
"help": "Decay rate associated with learning rate when using exponential decay."
},
)
|
dalle_mini.data/Dataset.__post_init__
|
Modified
|
borisdayma~dalle-mini
|
01996042a17f31230af032748a7583c5419dabcf
|
Merge pull request #162 from borisdayma/demo-improvements
|
<3>:<add> # set numpy rng
<add> self.np_rng = np.random.default_rng(self.seed_dataset)
|
# module: dalle_mini.data
@dataclass
class Dataset:
def __post_init__(self):
<0> if self.seed_dataset is None:
<1> # create a random seed
<2> self.seed_dataset = random.randint(0, 2**32 - 1)
<3> self.multi_hosts = jax.process_count() > 1
<4> # feed blank captions only in streaming mode for now
<5> # otherwise dataset could be cached with same blanked captions
<6> if self.blank_caption_prob:
<7> assert (
<8> self.streaming is True
<9> ), "blank_caption_prob can only be used in streaming mode"
<10> # define data_files
<11> if self.train_file is not None or self.validation_file is not None:
<12> # accept braceexpand notation
<13> for k in ["train_file", "validation_file"]:
<14> f = getattr(self, k)
<15> if isinstance(f, str):
<16> setattr(self, k, list(braceexpand(f)))
<17> # for list of files, split training data shards by host
<18> if (
<19> isinstance(self.train_file, list)
<20> and self.multi_hosts
<21> and self.shard_by_host
<22> ):
<23> self.train_file = self.train_file[
<24> jax.process_index() :: jax.process_count()
<25> ]
<26> data_files = {
<27> "train": self.train_file,
<28> "validation": self.validation_file,
<29> }
<30> else:
<31> data_files = None
<32>
<33> # load dataset
<34> dataset = load_dataset(
<35> self.dataset_repo_or_path,
<36> data_files=data_files,
<37> streaming=self.streaming,
<38> use_auth_token=self.use_auth_token,
<39> )
<40> if self.do_train:
<41> if "train" not in dataset:
<42> raise ValueError("Training requires a training dataset")
<43> self.train_dataset =</s>
|
===========below chunk 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
def __post_init__(self):
# offset: 1
if self.max_train_samples is not None:
self.train_dataset = (
self.train_dataset.take(self.max_train_samples)
if self.streaming
else self.train_dataset.select(range(self.max_train_samples))
)
if self.do_eval:
if "validation" not in dataset:
raise ValueError("Evaluating requires a validation dataset")
self.eval_dataset = dataset["validation"]
if self.max_eval_samples is not None:
self.eval_dataset = (
self.eval_dataset.take(self.max_eval_samples)
if self.streaming
else self.eval_dataset.select(range(self.max_eval_samples))
)
===========unchanged ref 0===========
at: dalle_mini.data.Dataset
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
shard_by_host: bool = False
blank_caption_prob: float = 0.0
clip_score_column: str = "clip_score"
min_clip_score: float = None
max_clip_score: float = None
filter_column: str = None
filter_value: str = None
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
multi_hosts: bool = field(init=False)
at: dalle_mini.data.Dataset.preprocess
self.train_dataset = (
self.train_dataset.map(partial_blank_caption_function)
if self.streaming
else self.train_dataset.map(
partial_blank_caption_function,
num_proc=None
if self.seed_dataset
else self.preprocessing_num_workers,
load_from_cache_file=False,
desc="Blanking some captions",
)
)
self.train_dataset = self.train_dataset.shuffle(
buffer_size=5000, seed=self.seed_dataset
)
===========unchanged ref 1===========
at: datasets.arrow_dataset.Dataset
wrapper(*, keep_in_memory: bool=False, indices_cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, new_fingerprint: Optional[str]=None)
at: datasets.load
load_dataset(path: str, name: Optional[str]=None, data_dir: Optional[str]=None, data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]]=None, split: Optional[Union[str, Split]]=None, cache_dir: Optional[str]=None, features: Optional[Features]=None, download_config: Optional[DownloadConfig]=None, download_mode: Optional[DownloadMode]=None, ignore_verifications: bool=False, keep_in_memory: Optional[bool]=None, save_infos: bool=False, revision: Optional[Union[str, Version]]=None, use_auth_token: Optional[Union[bool, str]]=None, task: Optional[Union[str, TaskTemplate]]=None, streaming: bool=False, num_proc: Optional[int]=None, *, num_process: int=1, process_id: int=0, seed: Optional[int]=None, experiment_id: Optional[str]=None, max_concurrent_cache_files: int=10000, timeout: Union[int, float]=100, base_path: Optional[str]=None, info: Optional[DatasetInfo]=None, repo_id: Optional[str]=None, **kwargs) -> Union[DatasetDict, Dataset, IterableDatasetDict, IterableDataset]
at: numpy.random._generator
default_rng(seed: None | _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator=...) -> Generator
at: random
randint = _inst.randint
===========changed ref 0===========
# module: tools.train.train
@dataclass
class TrainingArguments:
def __post_init__(self):
if self.assert_TPU_available:
assert (
jax.local_device_count() == 8
), "TPUs in use, please check running processes"
if self.output_dir.startswith("gs://"):
assert (
storage is not None
), 'Could not find google.storage. Install with "pip install google-cloud-storage"'
assert self.optim in [
"distributed_shampoo",
"adam",
"adafactor",
], f"Selected optimizer not supported: {self.optim}"
assert self.graft_type in [
"rmsprop_normalized",
"rmsprop",
"adagrad",
"adagrad_normalized",
"sgd",
"sqrt_n",
], f"Selected graft type not supported: {self.graft_type}"
assert self.lr_decay in [
None,
"linear",
"exponential",
], f"Selected learning rate decay not supported: {self.lr_decay}"
if self.per_device_eval_batch_size is None:
self.per_device_eval_batch_size = self.per_device_train_batch_size
if self.log_norm_steps is True:
self.log_norm_steps = self.logging_steps
if (
os.path.exists(self.output_dir)
and os.listdir(self.output_dir)
and self.do_train
and not self.overwrite_output_dir
):
raise ValueError(
f"Output directory ({self.output_dir}) already exists and is not empty."
"Use --overwrite_output_dir to overcome."
)
+ assert self.shard_shampoo_across in [
+ "dp",
+ "mp",
+ "2d",
+ ], f"Shard shampoo across {self.shard_shampoo_across}</s>
===========changed ref 1===========
# module: tools.train.train
@dataclass
class TrainingArguments:
def __post_init__(self):
# offset: 1
<s> <add> "2d",
+ ], f"Shard shampoo across {self.shard_shampoo_across} not supported."
assert (
self.mp_devices > 0
), f"Number of devices for model parallelism must be > 0"
assert (
jax.device_count() % self.mp_devices == 0
), f"Number of available devices ({jax.device_count()} must be divisible by number of devices used for model parallelism ({self.mp_devices})."
self.dp_devices = jax.device_count() // self.mp_devices
|
dalle_mini.data/Dataset.preprocess
|
Modified
|
borisdayma~dalle-mini
|
01996042a17f31230af032748a7583c5419dabcf
|
Merge pull request #162 from borisdayma/demo-improvements
|
# module: dalle_mini.data
@dataclass
class Dataset:
def preprocess(self, tokenizer, config):
<0> # get required config variables
<1> decoder_start_token_id = config.decoder_start_token_id
<2> normalize_text = config.normalize_text
<3> max_length = config.max_text_length
<4>
<5> if self.streaming:
<6> # we need to shuffle early in streaming mode
<7> if hasattr(self, "train_dataset"):
<8> self.train_dataset = self.train_dataset.shuffle(
<9> buffer_size=5000, seed=self.seed_dataset
<10> )
<11> else:
<12> self.rng_dataset = jax.random.PRNGKey(self.seed_dataset)
<13>
<14> # filter data
<15> partial_filter_function = partial(
<16> filter_function,
<17> filter_column=self.filter_column,
<18> filter_value=self.filter_value,
<19> clip_score_column=self.clip_score_column,
<20> min_clip_score=self.min_clip_score,
<21> max_clip_score=self.max_clip_score,
<22> )
<23> for ds in ["train_dataset", "eval_dataset"]:
<24> if hasattr(self, ds):
<25> setattr(
<26> self,
<27> ds,
<28> (
<29> getattr(self, ds).filter(partial_filter_function)
<30> if self.streaming
<31> else getattr(self, ds).filter(
<32> partial_filter_function,
<33> num_proc=self.preprocessing_num_workers,
<34> load_from_cache_file=not self.overwrite_cache,
<35> desc="Filtering datasets",
<36> )
<37> ),
<38> )
<39>
<40> # normalize text
<41> if normalize_text:
<42> text_normalizer = TextNormalizer()
<43> partial_normalize_function = partial(
<44> normalize_function,
<45> text_column=self.text_column,
<46> text_normalizer=text_normalizer</s>
|
===========below chunk 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
def preprocess(self, tokenizer, config):
# offset: 1
)
for ds in ["train_dataset", "eval_dataset"]:
if hasattr(self, ds):
setattr(
self,
ds,
(
getattr(self, ds).map(partial_normalize_function)
if self.streaming
else getattr(self, ds).map(
partial_normalize_function,
num_proc=self.preprocessing_num_workers,
load_from_cache_file=not self.overwrite_cache,
desc="Normalizing datasets",
)
),
)
# blank captions
if self.blank_caption_prob:
partial_blank_caption_function = partial(
blank_caption_function,
text_column=self.text_column,
blank_caption_prob=self.blank_caption_prob,
)
if hasattr(self, "train_dataset"):
self.train_dataset = (
self.train_dataset.map(partial_blank_caption_function)
if self.streaming
else self.train_dataset.map(
partial_blank_caption_function,
num_proc=self.preprocessing_num_workers,
load_from_cache_file=False,
desc="Blanking some captions",
)
)
# preprocess
partial_preprocess_function = partial(
preprocess_function,
tokenizer=tokenizer,
text_column=self.text_column,
encoding_column=self.encoding_column,
max_length=max_length,
decoder_start_token_id=decoder_start_token_id,
)
for ds in ["train_dataset", "eval_dataset"]:
if hasattr(self, ds):
setattr(
self,
ds,
(
getattr(self, ds).map(
partial_preprocess_function,
batched=True,
remove_columns=[
self.text_column,</s>
===========below chunk 1===========
# module: dalle_mini.data
@dataclass
class Dataset:
def preprocess(self, tokenizer, config):
# offset: 2
<s>(
partial_preprocess_function,
batched=True,
remove_columns=[
self.text_column,
self.encoding_column,
],
)
if self.streaming
else getattr(self, ds).map(
partial_preprocess_function,
batched=True,
remove_columns=getattr(ds, "column_names"),
num_proc=self.preprocessing_num_workers,
load_from_cache_file=not self.overwrite_cache,
desc="Preprocessing datasets",
)
),
)
===========unchanged ref 0===========
at: dalle_mini.data
blank_caption_function(example, text_column, blank_caption_prob, rng=None)
normalize_function(example, text_column, text_normalizer)
filter_function(example, min_clip_score, max_clip_score, clip_score_column, filter_column, filter_value)
preprocess_function(examples, tokenizer, text_column, encoding_column, max_length, decoder_start_token_id)
at: dalle_mini.data.Dataset
streaming: bool = True
text_column: str = "caption"
preprocessing_num_workers: int = None
overwrite_cache: bool = False
clip_score_column: str = "clip_score"
min_clip_score: float = None
max_clip_score: float = None
filter_column: str = None
filter_value: str = None
at: dalle_mini.data.Dataset.__post_init__
self.seed_dataset = random.randint(0, 2**32 - 1)
self.train_dataset = dataset["train"]
self.train_dataset = (
self.train_dataset.take(self.max_train_samples)
if self.streaming
else self.train_dataset.select(range(self.max_train_samples))
)
at: dalle_mini.data.Dataset.dataloader
self.rng_dataset, input_rng = jax.random.split(self.rng_dataset)
at: datasets.arrow_dataset.Dataset
wrapper(*, generator: Optional[np.random.Generator]=None, keep_in_memory: bool=False, load_from_cache_file: bool=True, indices_cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, new_fingerprint: Optional[str]=None)
===========unchanged ref 1===========
at: functools
partial(func: Callable[..., _T], *args: Any, **kwargs: Any)
partial(func, *args, **keywords, /) -> function with partial application()
===========changed ref 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
def __post_init__(self):
if self.seed_dataset is None:
# create a random seed
self.seed_dataset = random.randint(0, 2**32 - 1)
+ # set numpy rng
+ self.np_rng = np.random.default_rng(self.seed_dataset)
self.multi_hosts = jax.process_count() > 1
# feed blank captions only in streaming mode for now
# otherwise dataset could be cached with same blanked captions
if self.blank_caption_prob:
assert (
self.streaming is True
), "blank_caption_prob can only be used in streaming mode"
# define data_files
if self.train_file is not None or self.validation_file is not None:
# accept braceexpand notation
for k in ["train_file", "validation_file"]:
f = getattr(self, k)
if isinstance(f, str):
setattr(self, k, list(braceexpand(f)))
# for list of files, split training data shards by host
if (
isinstance(self.train_file, list)
and self.multi_hosts
and self.shard_by_host
):
self.train_file = self.train_file[
jax.process_index() :: jax.process_count()
]
data_files = {
"train": self.train_file,
"validation": self.validation_file,
}
else:
data_files = None
# load dataset
dataset = load_dataset(
self.dataset_repo_or_path,
data_files=data_files,
streaming=self.streaming,
use_auth_token=self.use_auth_token,
)
if self.do_train:
if "train" not in dataset:
raise ValueError("Training requires a training dataset")
self.train_dataset = dataset["train"]
if self.max_train</s>
===========changed ref 1===========
# module: dalle_mini.data
@dataclass
class Dataset:
def __post_init__(self):
# offset: 1
<s> raise ValueError("Training requires a training dataset")
self.train_dataset = dataset["train"]
if self.max_train_samples is not None:
self.train_dataset = (
self.train_dataset.take(self.max_train_samples)
if self.streaming
else self.train_dataset.select(range(self.max_train_samples))
)
if self.do_eval:
if "validation" not in dataset:
raise ValueError("Evaluating requires a validation dataset")
self.eval_dataset = dataset["validation"]
if self.max_eval_samples is not None:
self.eval_dataset = (
self.eval_dataset.take(self.max_eval_samples)
if self.streaming
else self.eval_dataset.select(range(self.max_eval_samples))
)
|
|
dalle_mini.data/blank_caption_function
|
Modified
|
borisdayma~dalle-mini
|
01996042a17f31230af032748a7583c5419dabcf
|
Merge pull request #162 from borisdayma/demo-improvements
|
<0>:<add> if (
<add> blank_caption_prob
<add> and (rng.random() if rng is not None else np.random.random())
<add> < blank_caption_prob
<add> ):
<del> if blank_caption_prob and np.random.rand() < blank_caption_prob:
|
# module: dalle_mini.data
+ def blank_caption_function(example, text_column, blank_caption_prob, rng=None):
- def blank_caption_function(example, text_column, blank_caption_prob):
<0> if blank_caption_prob and np.random.rand() < blank_caption_prob:
<1> example[text_column] = ""
<2> return example
<3>
|
===========unchanged ref 0===========
at: dalle_mini.data.shift_tokens_right
shifted_input_ids = np.zeros(input_ids.shape)
shifted_input_ids[:, 0] = decoder_start_token_id
shifted_input_ids[:, 1:] = input_ids[:, :-1]
===========changed ref 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
def __post_init__(self):
if self.seed_dataset is None:
# create a random seed
self.seed_dataset = random.randint(0, 2**32 - 1)
+ # set numpy rng
+ self.np_rng = np.random.default_rng(self.seed_dataset)
self.multi_hosts = jax.process_count() > 1
# feed blank captions only in streaming mode for now
# otherwise dataset could be cached with same blanked captions
if self.blank_caption_prob:
assert (
self.streaming is True
), "blank_caption_prob can only be used in streaming mode"
# define data_files
if self.train_file is not None or self.validation_file is not None:
# accept braceexpand notation
for k in ["train_file", "validation_file"]:
f = getattr(self, k)
if isinstance(f, str):
setattr(self, k, list(braceexpand(f)))
# for list of files, split training data shards by host
if (
isinstance(self.train_file, list)
and self.multi_hosts
and self.shard_by_host
):
self.train_file = self.train_file[
jax.process_index() :: jax.process_count()
]
data_files = {
"train": self.train_file,
"validation": self.validation_file,
}
else:
data_files = None
# load dataset
dataset = load_dataset(
self.dataset_repo_or_path,
data_files=data_files,
streaming=self.streaming,
use_auth_token=self.use_auth_token,
)
if self.do_train:
if "train" not in dataset:
raise ValueError("Training requires a training dataset")
self.train_dataset = dataset["train"]
if self.max_train</s>
===========changed ref 1===========
# module: dalle_mini.data
@dataclass
class Dataset:
def __post_init__(self):
# offset: 1
<s> raise ValueError("Training requires a training dataset")
self.train_dataset = dataset["train"]
if self.max_train_samples is not None:
self.train_dataset = (
self.train_dataset.take(self.max_train_samples)
if self.streaming
else self.train_dataset.select(range(self.max_train_samples))
)
if self.do_eval:
if "validation" not in dataset:
raise ValueError("Evaluating requires a validation dataset")
self.eval_dataset = dataset["validation"]
if self.max_eval_samples is not None:
self.eval_dataset = (
self.eval_dataset.take(self.max_eval_samples)
if self.streaming
else self.eval_dataset.select(range(self.max_eval_samples))
)
===========changed ref 2===========
# module: dalle_mini.data
@dataclass
class Dataset:
def preprocess(self, tokenizer, config):
# get required config variables
decoder_start_token_id = config.decoder_start_token_id
normalize_text = config.normalize_text
max_length = config.max_text_length
if self.streaming:
# we need to shuffle early in streaming mode
if hasattr(self, "train_dataset"):
self.train_dataset = self.train_dataset.shuffle(
buffer_size=5000, seed=self.seed_dataset
)
else:
self.rng_dataset = jax.random.PRNGKey(self.seed_dataset)
# filter data
partial_filter_function = partial(
filter_function,
filter_column=self.filter_column,
filter_value=self.filter_value,
clip_score_column=self.clip_score_column,
min_clip_score=self.min_clip_score,
max_clip_score=self.max_clip_score,
)
for ds in ["train_dataset", "eval_dataset"]:
if hasattr(self, ds):
setattr(
self,
ds,
(
getattr(self, ds).filter(partial_filter_function)
if self.streaming
else getattr(self, ds).filter(
partial_filter_function,
num_proc=self.preprocessing_num_workers,
load_from_cache_file=not self.overwrite_cache,
desc="Filtering datasets",
)
),
)
# normalize text
if normalize_text:
text_normalizer = TextNormalizer()
partial_normalize_function = partial(
normalize_function,
text_column=self.text_column,
text_normalizer=text_normalizer,
)
for ds in ["train_dataset", "eval_dataset"]:
if hasattr(self, ds):
setattr(
self,
ds,
(
getattr</s>
===========changed ref 3===========
# module: dalle_mini.data
@dataclass
class Dataset:
def preprocess(self, tokenizer, config):
# offset: 1
<s>dataset"]:
if hasattr(self, ds):
setattr(
self,
ds,
(
getattr(self, ds).map(partial_normalize_function)
if self.streaming
else getattr(self, ds).map(
partial_normalize_function,
num_proc=self.preprocessing_num_workers,
load_from_cache_file=not self.overwrite_cache,
desc="Normalizing datasets",
)
),
)
# blank captions
if self.blank_caption_prob:
partial_blank_caption_function = partial(
blank_caption_function,
text_column=self.text_column,
blank_caption_prob=self.blank_caption_prob,
+ rng=self.np_rng,
)
if hasattr(self, "train_dataset"):
self.train_dataset = (
self.train_dataset.map(partial_blank_caption_function)
if self.streaming
else self.train_dataset.map(
partial_blank_caption_function,
+ num_proc=None
+ if self.seed_dataset
+ else self.preprocessing_num_workers,
- num_proc=self.preprocessing_num_workers,
load_from_cache_file=False,
desc="Blanking some captions",
)
)
# preprocess
partial_preprocess_function = partial(
preprocess_function,
tokenizer=tokenizer,
text_column=self.text_column,
encoding_column=self.encoding_column,
max_length=max_length,
decoder_start_token_id=decoder_start_token_id,
)
for ds in ["train_dataset", "eval_dataset"]:
</s>
|
dalle_mini.model.partitions/set_partitions
|
Modified
|
borisdayma~dalle-mini
|
07a6f9a7ae599755f4d97d9ff0932b15f6732a57
|
feat: scan layers + gradient checkpointing (#161)
|
<7>:<add> l = list(result.keys())
<add> if use_scan:
<add> # add None dimension to scanned layers
<add> result = {
<add> k: (P(*(None,) + v) if v is not None else None)
<add> if any(x in k for x in ["FlaxBartEncoderLayers", "FlaxBartDecoderLayers"])
<add> else v
<add> for k, v in result.items()
<add> }
|
# module: dalle_mini.model.partitions
+ def set_partitions(in_dict, use_scan):
- def set_partitions(in_dict):
<0> rules = _get_partition_rules()
<1> replace = _replacement_rules(rules)
<2> initd = {k: _unmatched for k in flatten_dict(in_dict)}
<3> result = {k: replace(k, v) for k, v in initd.items()}
<4> for k, v in result.items():
<5> if v == _unmatched:
<6> print(f"Unmatched -> {k}")
<7> assert _unmatched not in result.values(), "Incomplete partition spec."
<8> return freeze(unflatten_dict(result))
<9>
|
===========unchanged ref 0===========
at: dalle_mini.model.partitions
_unmatched = object()
_replacement_rules(rules)
_get_partition_rules()
===========changed ref 0===========
# module: tools.train.train
class TrainState(train_state.TrainState):
+ @classmethod
+ def create(cls, *, apply_fn, params, tx, **kwargs):
+ opt_state = tx.init(cls.unscan(params))
+ return cls(
+ step=0,
+ apply_fn=apply_fn,
+ params=params,
+ tx=tx,
+ opt_state=opt_state,
+ **kwargs,
+ )
+
===========changed ref 1===========
# module: tools.train.train
class TrainState(train_state.TrainState):
+ @staticmethod
+ def unscan(params):
+ params = unfreeze(params)
+ for l in ["encoder", "decoder"]:
+ params["model"][l]["layers"] = jax.tree_map(
+ lambda x: {f"{i}": x[i] for i in range(len(x))},
+ params["model"][l]["layers"],
+ )
+ params = freeze(params)
+ return params
+
===========changed ref 2===========
# module: tools.train.train
class TrainState(train_state.TrainState):
+ @staticmethod
+ def rescan(params):
+ params = unfreeze(params)
+ for l in ["encoder", "decoder"]:
+ params["model"][l]["layers"] = jax.tree_map(
+ lambda x: jnp.stack([x[f"{i}"] for i in range(len(x))]),
+ params["model"][l]["layers"],
+ is_leaf=lambda x: "0" in x,
+ )
+ params = freeze(params)
+ return params
+
===========changed ref 3===========
# module: tools.train.train
class TrainState(train_state.TrainState):
+ def apply_gradients(self, *, grads, **kwargs):
+ params = self.unscan(self.params)
+ updates, new_opt_state = self.tx.update(
+ self.unscan(grads), self.opt_state, params
+ )
+ params = optax.apply_updates(params, updates)
+ return self.replace(
+ step=self.step + 1,
+ params=self.rescan(params),
+ opt_state=new_opt_state,
+ **kwargs,
+ )
+
===========changed ref 4===========
# module: tools.train.train
def main():
# See all possible arguments by passing the --help flag to this script.
parser = HfArgumentParser(
(ModelArguments, DataTrainingArguments, TrainingArguments)
)
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# check arguments
if training_args.mp_devices > jax.local_device_count():
assert (
data_args.seed_dataset is not None
), "Seed dataset must be provided when model is split over multiple hosts"
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
if jax.process_index() == 0:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# Set the verbosity to info of the Transformers logger (on main process only):
logger.info(f"Training/evaluation parameters {training_args}")
# Load dataset
dataset = Dataset(
**asdict(data_args),
do_train=training_args</s>
===========changed ref 5===========
# module: tools.train.train
def main():
# offset: 1
<s>
# Load dataset
dataset = Dataset(
**asdict(data_args),
do_train=training_args.do_train,
do_eval=training_args.do_eval,
)
logger.info(f"Local TPUs: {jax.local_device_count()}")
logger.info(f"Global TPUs: {jax.device_count()}")
# Set up wandb run
if jax.process_index() == 0:
wandb.init(
entity=training_args.wandb_entity,
project=training_args.wandb_project,
job_type=training_args.wandb_job_type,
config=parser.parse_args(),
)
# Set up our new model config
if model_args.config_name:
config = DalleBartConfig.from_pretrained(model_args.config_name)
config.gradient_checkpointing = training_args.gradient_checkpointing
else:
config = None
# Load or create new model
if model_args.model_name_or_path:
model = DalleBart.from_pretrained(
model_args.model_name_or_path,
config=config,
seed=training_args.seed_model,
dtype=getattr(jnp, model_args.dtype),
abstract_init=True, # we overwrite them with loaded checkpoint
gradient_checkpointing=training_args.gradient_checkpointing,
)
else:
model = DalleBart(
config,
seed=training_args.seed_model,
dtype=getattr(jnp, model_args.dtype),
abstract_init=True,
)
# get model metadata
model_metadata = model_args.get_metadata()
# get PartitionSpec for</s>
|
dalle_mini.model.configuration/DalleBartConfig.__init__
|
Modified
|
borisdayma~dalle-mini
|
07a6f9a7ae599755f4d97d9ff0932b15f6732a57
|
feat: scan layers + gradient checkpointing (#161)
|
<s> implemented yet - from "Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation"
sinkhorn_iters=1, # used in SinkFormers
use_final_ln_encoder=True, # final layer normalization in encoder
use_final_ln_decoder=True, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
force_ln_scale=False, # force scale in layernorm even when followed by dense layers
**kwargs,
):
<0> # text normalizer
<1> self.normalize_text = normalize_text
<2>
<3> # transformer variants
<4> self.use_bias = use_bias
<5> assert ln_type in [
<6> "rmsnorm",
<7> "layernorm",
<8> ], "ln_type must be 'rmsnorm' or 'layernorm'"
<9> self.ln_type = ln_type
<10> if ln_positions == "deepnet":
<11> ln_positions = "postln"
<12> assert ln_positions in [
<13> "normformer",
<14> "swinv2",
<15> "cogview",
<16> "postln",
<17> "preln",
<18> ], "ln_positions must be 'normformer', 'swinv2', 'cogview', 'postln', 'preln'"
<19> self.use_head_scale = use_head_scale
<20> assert use_alibi is False, "use_alibi is not supported yet"
<21> self.ln_positions = ln_positions
<22> self.use_cosine_attention = use_cosine_attention
<23> self.tau_init = tau_init
<24> self.use_absolute_position_embeddings = use_absolute_position_embeddings
<25> self.use_swin_position_embeddings = use_swin_position_embeddings
<26> self.use_deepnet_scaling = use_deepnet_scaling
<27> self.use_glu = use_glu
<28> self.use_alibi = use_alibi
<29> self.sinkhorn_iters = sinkhorn</s>
|
===========below chunk 0===========
<s>Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation"
sinkhorn_iters=1, # used in SinkFormers
use_final_ln_encoder=True, # final layer normalization in encoder
use_final_ln_decoder=True, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
force_ln_scale=False, # force scale in layernorm even when followed by dense layers
**kwargs,
):
# offset: 1
if ln_positions == "postln":
assert (
use_final_ln_encoder
), "use_final_ln_encoder must be True when ln_positions is 'postln'"
assert (
use_final_ln_decoder
), "use_final_ln_decoder must be True when ln_positions is 'postln'"
self.use_final_ln_encoder = use_final_ln_encoder
self.use_final_ln_decoder = use_final_ln_decoder
self.force_ln_scale = force_ln_scale
# common parameters
self.encoder_vocab_size = encoder_vocab_size
self.image_vocab_size = image_vocab_size
self.image_length = image_length
self.max_text_length = max_text_length
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.use_cache = use_cache
self.gradient_checkpointing = gradient_checkpointing
self.scale_embedding = (
scale</s>
===========below chunk 1===========
<s>Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation"
sinkhorn_iters=1, # used in SinkFormers
use_final_ln_encoder=True, # final layer normalization in encoder
use_final_ln_decoder=True, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
force_ln_scale=False, # force scale in layernorm even when followed by dense layers
**kwargs,
):
# offset: 2
<s>cache = use_cache
self.gradient_checkpointing = gradient_checkpointing
self.scale_embedding = (
scale_embedding # scale factor will be sqrt(d_model) if True
)
# special token id's are appended to vocab if not provided
decoder_start_token_id = kwargs.pop("decoder_start_token_id", image_vocab_size)
bos_token_id = kwargs.pop("bos_token_id", image_vocab_size)
pad_token_id = kwargs.pop("pad_token_id", image_vocab_size)
eos_token_id = kwargs.pop("eos_token_id", image_vocab_size)
# we generate to image_length + 1 (for bos) by default
min_length = kwargs.pop("min_length", image_length + 1)
max_length = kwargs.pop("max_length", image_length + 1)
super().__init__(
# args required in parent class
is_encoder_decoder=is_encoder_decoder,
tie_word_embeddings=tie_word_embeddings,
forced_eos_token_id=forced_eos_token_id,
decoder_start_token_id=decoder_start_token_id,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
eos_token_id=eos</s>
===========below chunk 2===========
<s>Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation"
sinkhorn_iters=1, # used in SinkFormers
use_final_ln_encoder=True, # final layer normalization in encoder
use_final_ln_decoder=True, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
force_ln_scale=False, # force scale in layernorm even when followed by dense layers
**kwargs,
):
# offset: 3
<s>_id,
min_length=min_length,
max_length=max_length,
do_sample=do_sample,
**kwargs,
)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get(
"force_bos_token_to_be_generated", False
):
self.forced_bos_token_id = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions."
"The config can simply be saved and uploaded again to be fixed."
)
===========unchanged ref 0===========
at: dalle_mini.model.configuration.DalleBartConfig.__init__
self.forced_bos_token_id = self.bos_token_id
at: transformers.configuration_utils.PretrainedConfig
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
_auto_class: Optional[str] = None
__init__(**kwargs)
__init__(self, **kwargs)
at: typing.Mapping
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
get(key: _KT) -> Optional[_VT_co]
at: typing.MutableMapping
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
pop(key: _KT) -> _VT
===========changed ref 0===========
# module: tools.train.train
class TrainState(train_state.TrainState):
+ @classmethod
+ def create(cls, *, apply_fn, params, tx, **kwargs):
+ opt_state = tx.init(cls.unscan(params))
+ return cls(
+ step=0,
+ apply_fn=apply_fn,
+ params=params,
+ tx=tx,
+ opt_state=opt_state,
+ **kwargs,
+ )
+
===========changed ref 1===========
# module: tools.train.train
class TrainState(train_state.TrainState):
+ @staticmethod
+ def unscan(params):
+ params = unfreeze(params)
+ for l in ["encoder", "decoder"]:
+ params["model"][l]["layers"] = jax.tree_map(
+ lambda x: {f"{i}": x[i] for i in range(len(x))},
+ params["model"][l]["layers"],
+ )
+ params = freeze(params)
+ return params
+
===========changed ref 2===========
# module: tools.train.train
class TrainState(train_state.TrainState):
+ @staticmethod
+ def rescan(params):
+ params = unfreeze(params)
+ for l in ["encoder", "decoder"]:
+ params["model"][l]["layers"] = jax.tree_map(
+ lambda x: jnp.stack([x[f"{i}"] for i in range(len(x))]),
+ params["model"][l]["layers"],
+ is_leaf=lambda x: "0" in x,
+ )
+ params = freeze(params)
+ return params
+
|
|
dalle_mini.model.modeling/FlaxBartEncoderLayer.__call__
|
Modified
|
borisdayma~dalle-mini
|
07a6f9a7ae599755f4d97d9ff0932b15f6732a57
|
feat: scan layers + gradient checkpointing (#161)
|
<0>:<add> if self.config.use_scan:
<add> hidden_states = hidden_states[0]
<add>
|
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
<0> res_gain = (
<1> deepnet_gain["encoder"]["alpha"](self.config)
<2> if self.config.use_deepnet_scaling
<3> else 1
<4> )
<5>
<6> embed_dim = self.config.d_model
<7> residual = hidden_states
<8> if self.config.ln_positions in ["normformer", "cogview", "preln"]:
<9> hidden_states = norm(
<10> self.config.ln_type,
<11> dtype=self.dtype,
<12> epsilon=1e-05,
<13> use_scale=self.config.force_ln_scale,
<14> )(hidden_states)
<15> hidden_states, attn_weights = FlaxBartAttention(
<16> config=self.config,
<17> embed_dim=embed_dim,
<18> num_heads=self.config.encoder_attention_heads,
<19> dropout=self.config.attention_dropout,
<20> bias=self.config.use_bias,
<21> dtype=self.dtype,
<22> is_encoder=True,
<23> q_length=self.config.max_text_length,
<24> k_length=self.config.max_text_length,
<25> )(hidden_states=hidden_states, attention_mask=attention_mask)
<26>
<27> if self.config.ln_positions in ["normformer", "swinv2", "cogview"]:
<28> hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
<29> hidden_states
<30> )
<31> hidden_states = nn.Dropout(rate=self.config.dropout)(
<32> hidden_states,</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 1
)
hidden_states = residual * res_gain + hidden_states
if self.config.ln_positions in ["postln"]:
hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
hidden_states
)
residual = hidden_states
ff_block = (
GLU(
config=self.config,
ffn_dim=self.config.encoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=True,
)
if self.config.use_glu
else FFN(
config=self.config,
ffn_dim=self.config.encoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=True,
)
)
hidden_states = ff_block(hidden_states, deterministic=deterministic)
hidden_states = residual * res_gain + hidden_states
if self.add_norm or self.config.ln_positions in ["postln"]:
use_scale = (
self.use_scale
or self.config.ln_positions == "postln"
or self.config.force_ln_scale
)
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=use_scale,
)(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
===========below chunk 1===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 2
<s>hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: dalle_mini.model.modeling.FlaxBartEncoderLayer
config: DalleBartConfig
dtype: jnp.dtype = jnp.float32
add_norm: bool = False
use_scale: bool = True
at: typing
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
===========changed ref 0===========
# module: tools.train.train
class TrainState(train_state.TrainState):
+ @classmethod
+ def create(cls, *, apply_fn, params, tx, **kwargs):
+ opt_state = tx.init(cls.unscan(params))
+ return cls(
+ step=0,
+ apply_fn=apply_fn,
+ params=params,
+ tx=tx,
+ opt_state=opt_state,
+ **kwargs,
+ )
+
===========changed ref 1===========
# module: tools.train.train
class TrainState(train_state.TrainState):
+ @staticmethod
+ def unscan(params):
+ params = unfreeze(params)
+ for l in ["encoder", "decoder"]:
+ params["model"][l]["layers"] = jax.tree_map(
+ lambda x: {f"{i}": x[i] for i in range(len(x))},
+ params["model"][l]["layers"],
+ )
+ params = freeze(params)
+ return params
+
===========changed ref 2===========
# module: tools.train.train
class TrainState(train_state.TrainState):
+ @staticmethod
+ def rescan(params):
+ params = unfreeze(params)
+ for l in ["encoder", "decoder"]:
+ params["model"][l]["layers"] = jax.tree_map(
+ lambda x: jnp.stack([x[f"{i}"] for i in range(len(x))]),
+ params["model"][l]["layers"],
+ is_leaf=lambda x: "0" in x,
+ )
+ params = freeze(params)
+ return params
+
===========changed ref 3===========
# module: tools.train.train
class TrainState(train_state.TrainState):
+ def apply_gradients(self, *, grads, **kwargs):
+ params = self.unscan(self.params)
+ updates, new_opt_state = self.tx.update(
+ self.unscan(grads), self.opt_state, params
+ )
+ params = optax.apply_updates(params, updates)
+ return self.replace(
+ step=self.step + 1,
+ params=self.rescan(params),
+ opt_state=new_opt_state,
+ **kwargs,
+ )
+
===========changed ref 4===========
# module: dalle_mini.model.partitions
+ def set_partitions(in_dict, use_scan):
- def set_partitions(in_dict):
rules = _get_partition_rules()
replace = _replacement_rules(rules)
initd = {k: _unmatched for k in flatten_dict(in_dict)}
result = {k: replace(k, v) for k, v in initd.items()}
for k, v in result.items():
if v == _unmatched:
print(f"Unmatched -> {k}")
+ l = list(result.keys())
+ if use_scan:
+ # add None dimension to scanned layers
+ result = {
+ k: (P(*(None,) + v) if v is not None else None)
+ if any(x in k for x in ["FlaxBartEncoderLayers", "FlaxBartDecoderLayers"])
+ else v
+ for k, v in result.items()
+ }
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(result))
|
dalle_mini.model.modeling/FlaxBartDecoderLayer.__call__
|
Modified
|
borisdayma~dalle-mini
|
07a6f9a7ae599755f4d97d9ff0932b15f6732a57
|
feat: scan layers + gradient checkpointing (#161)
|
<0>:<add> if self.config.use_scan:
<add> hidden_states = hidden_states[0]
<add>
|
<s> FlaxBartDecoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
<0> res_gain = (
<1> deepnet_gain["decoder"]["alpha"](self.config)
<2> if self.config.use_deepnet_scaling
<3> else 1
<4> )
<5>
<6> embed_dim = self.config.d_model
<7> residual = hidden_states
<8>
<9> # Self Attention
<10> if self.config.ln_positions in ["normformer", "cogview", "preln"]:
<11> hidden_states = norm(
<12> self.config.ln_type,
<13> dtype=self.dtype,
<14> epsilon=1e-05,
<15> use_scale=self.config.force_ln_scale,
<16> )(hidden_states)
<17> hidden_states, attn_weights = FlaxBartAttention(
<18> config=self.config,
<19> embed_dim=embed_dim,
<20> num_heads=self.config.decoder_attention_heads,
<21> dropout=self.config.attention_dropout,
<22> causal=True,
<23> bias=self.config.use_bias,
<24> dtype=self.dtype,
<25> is_encoder=False,
<26> q_length=self.config.image_length,
<27> k_length=self.config.image_length,
<28> )(
<29> hidden_states=hidden_states,
<30> attention_mask=attention_mask,
<31> init_cache=init_cache,
<32> )
<33>
<34> if self.config.ln_positions in ["normformer", "swinv2", "cogview"]:</s>
|
===========below chunk 0===========
<s>Layer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 1
hidden_states
)
hidden_states = nn.Dropout(rate=self.config.dropout)(
hidden_states, deterministic=deterministic
)
hidden_states = residual * res_gain + hidden_states
if self.config.ln_positions in ["postln"]:
hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
hidden_states
)
# Cross Attention
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
if self.config.ln_positions in ["normformer", "cogview", "preln"]:
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(hidden_states)
hidden_states, cross_attn_weights = FlaxBartAttention(
config=self.config,
embed_dim=embed_dim,
num_heads=self.config.decoder_attention_heads,
dropout=self.config.attention_dropout,
bias=self.config.use_bias,
dtype=self.dtype,
is_encoder=False,
q_length=self.config.image_length,
k_length=self.config.max_text_length,
)(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention</s>
===========below chunk 1===========
<s>Layer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 2
<s> )(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
)
if self.config.ln_positions in ["normformer", "swinv2", "cogview"]:
hidden_states = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05
)(hidden_states)
hidden_states = nn.Dropout(rate=self.config.dropout)(
hidden_states, deterministic=deterministic
)
hidden_states = residual * res_gain + hidden_states
if self.config.ln_positions in ["postln"]:
hidden_states = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05
)(hidden_states)
# Feed forward
residual = hidden_states
ff_block = (
GLU(
config=self.config,
ffn_dim=self.config.decoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=False,
)
if self.config.use_glu
else FFN(
config=self.config,
ffn_dim=self.config.decoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
</s>
===========below chunk 2===========
<s>Layer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 3
<s>_encoder=False,
)
)
hidden_states = ff_block(hidden_states, deterministic=deterministic)
hidden_states = residual * res_gain + hidden_states
if self.add_norm or self.config.ln_positions in ["postln"]:
use_scale = (
self.use_scale
or self.config.ln_positions == "postln"
or self.config.force_ln_scale
)
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=use_scale,
)(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights, cross_attn_weights)
return outputs
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: dalle_mini.model.modeling.FlaxBartDecoderLayer
config: DalleBartConfig
dtype: jnp.dtype = jnp.float32
add_norm: bool = False
use_scale: bool = False
at: typing
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
===========changed ref 0===========
# module: dalle_mini.model.modeling
class FlaxBartDecoderLayer(nn.Module):
"""
Edits:
- no bias
- use custom FlaxBartAttention
"""
config: DalleBartConfig
dtype: jnp.dtype = jnp.float32
add_norm: bool = False
+ use_scale: bool = True
- use_scale: bool = False
|
dalle_mini.model.modeling/FlaxBartEncoderLayerCollection.__call__
|
Modified
|
borisdayma~dalle-mini
|
07a6f9a7ae599755f4d97d9ff0932b15f6732a57
|
feat: scan layers + gradient checkpointing (#161)
|
<5>:<add> remat(
<add> FlaxBartEncoderLayer,
<add> static_argnums=(2, 3),
<add> prevent_cse=not self.config.use_scan,
<add> )
<del> remat(FlaxBartEncoderLayer, static_argnums=(2, 3))
<9>:<add>
<add> if self.config.use_scan:
<add> # all blocks are the same so we use nn.scan
<add> assert not output_attentions, "cannot scan with output_attentions"
<add> assert not output_hidden_states, "cannot scan with output_hidden_states"
<del> for i in range(n_layers):
<10>:<del> if output_hidden_states:
<11>:<add> hidden_states = (hidden_states,)
<del> all_hidden_states += (hidden_states,)
<12>:<add> # we use a scale on all norms (even last layer) to allow scanning
<add> hidden_states, _ = nn.scan(
<add> layer,
<add> variable_axes={"params": 0},
<add> split_rngs={"params": True, "dropout": True},
<add> in_axes=(nn.broadcast, nn.broadcast, nn.broadcast),
<add> length=n_layers,
<add> )(
<add> self.config
|
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayerCollection(nn.Module):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
<0> all_hidden_states = () if output_hidden_states else None
<1> all_self_attns = () if output_attentions else None
<2>
<3> n_layers = self.config.encoder_layers
<4> layer = (
<5> remat(FlaxBartEncoderLayer, static_argnums=(2, 3))
<6> if self.config.gradient_checkpointing
<7> else FlaxBartEncoderLayer
<8> )
<9> for i in range(n_layers):
<10> if output_hidden_states:
<11> all_hidden_states += (hidden_states,)
<12> # final layernorm on the output of the last layer
<13> # or every 6 layers for Swin v2
<14> add_norm = (
<15> self.config.ln_positions == "swinv2" and ((i + 1) % 6 == 0)
<16> ) or (self.config.use_final_ln_encoder and (i == n_layers - 1))
<17> # we don't need to scale the norm for the last layer
<18> use_scale = i != n_layers - 1
<19> layer_outputs = layer(
<20> self.config, dtype=self.dtype, add_norm=add_norm, use_scale=use_scale
<21> )(
<22> hidden_states,
<23> attention_mask,
<24> output_attentions,
<25> deterministic,
<26> )
<27> hidden_states = layer_outputs[0]
<28> if output_attentions:
<29> all_self_attns += (layer_outputs[1],)
<30>
<31> # add hidden states from the last layer
<32> if output_hidden_states:
<33> all_hidden_states += (</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayerCollection(nn.Module):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# offset: 1
outputs = [
hidden_states,
all_hidden_states,
all_self_attns,
]
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
===========unchanged ref 0===========
at: dalle_mini.model.modeling
remat = nn_partitioning.remat
at: dalle_mini.model.modeling.FlaxBartEncoderLayerCollection
config: DalleBartConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
at: transformers.modeling_flax_outputs
FlaxBaseModelOutput(**kwargs: _VT)
FlaxBaseModelOutput(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
FlaxBaseModelOutput(map: Mapping[_KT, _VT], **kwargs: _VT)
===========changed ref 0===========
# module: dalle_mini.model.modeling
class FlaxBartDecoderLayer(nn.Module):
"""
Edits:
- no bias
- use custom FlaxBartAttention
"""
config: DalleBartConfig
dtype: jnp.dtype = jnp.float32
add_norm: bool = False
+ use_scale: bool = True
- use_scale: bool = False
===========changed ref 1===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
+ if self.config.use_scan:
+ hidden_states = hidden_states[0]
+
res_gain = (
deepnet_gain["encoder"]["alpha"](self.config)
if self.config.use_deepnet_scaling
else 1
)
embed_dim = self.config.d_model
residual = hidden_states
if self.config.ln_positions in ["normformer", "cogview", "preln"]:
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(hidden_states)
hidden_states, attn_weights = FlaxBartAttention(
config=self.config,
embed_dim=embed_dim,
num_heads=self.config.encoder_attention_heads,
dropout=self.config.attention_dropout,
bias=self.config.use_bias,
dtype=self.dtype,
is_encoder=True,
q_length=self.config.max_text_length,
k_length=self.config.max_text_length,
)(hidden_states=hidden_states, attention_mask=attention_mask)
if self.config.ln_positions in ["normformer", "swinv2", "cogview"]:
hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
hidden_states
)
hidden_states = nn.Dropout(rate=self.config.dropout)(
hidden_states, deterministic=deterministic
</s>
===========changed ref 2===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 1
<s> hidden_states = nn.Dropout(rate=self.config.dropout)(
hidden_states, deterministic=deterministic
)
hidden_states = residual * res_gain + hidden_states
if self.config.ln_positions in ["postln"]:
hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
hidden_states
)
residual = hidden_states
ff_block = (
GLU(
config=self.config,
ffn_dim=self.config.encoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=True,
)
if self.config.use_glu
else FFN(
config=self.config,
ffn_dim=self.config.encoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=True,
)
)
hidden_states = ff_block(hidden_states, deterministic=deterministic)
hidden_states = residual * res_gain + hidden_states
- if self.add_norm or self.config.ln_positions in ["postln"]:
- use_scale = (
- self.use_scale
- or self.config.ln_positions == "postln"
+ if self.add_norm:
+ use_scale = self.use_scale or self.config.force_ln_scale
- or self</s>
===========changed ref 3===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 2
<s>.force_ln_scale
- )
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=use_scale,
)(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
+ if self.config.use_scan:
+ outputs = (outputs, None)
+
return outputs
===========changed ref 4===========
# module: tools.train.train
class TrainState(train_state.TrainState):
+ @classmethod
+ def create(cls, *, apply_fn, params, tx, **kwargs):
+ opt_state = tx.init(cls.unscan(params))
+ return cls(
+ step=0,
+ apply_fn=apply_fn,
+ params=params,
+ tx=tx,
+ opt_state=opt_state,
+ **kwargs,
+ )
+
===========changed ref 5===========
# module: tools.train.train
class TrainState(train_state.TrainState):
+ @staticmethod
+ def unscan(params):
+ params = unfreeze(params)
+ for l in ["encoder", "decoder"]:
+ params["model"][l]["layers"] = jax.tree_map(
+ lambda x: {f"{i}": x[i] for i in range(len(x))},
+ params["model"][l]["layers"],
+ )
+ params = freeze(params)
+ return params
+
|
dalle_mini.model.modeling/FlaxBartDecoderLayerCollection.__call__
|
Modified
|
borisdayma~dalle-mini
|
07a6f9a7ae599755f4d97d9ff0932b15f6732a57
|
feat: scan layers + gradient checkpointing (#161)
|
<9>:<add> remat(
<add> FlaxBartDecoderLayer,
<add> static_argnums=(4, 5, 6),
<add> prevent_cse=not self.config.use_scan,
<add> )
<del> remat(FlaxBartDecoderLayer, static_argnums=(4, 5, 6))
<13>:<add>
<add> if self.config.use_scan:
<add> # all blocks are the same so we use nn.scan
<add> assert not output_attentions, "cannot scan with output_attentions"
<add> assert not output_hidden_states, "cannot scan with output_hidden_states"
<del> for i in range(n_layers):
<14>:<del> if output_hidden_states:
<15>:<add> hidden_states = (hidden_states,)
<del> all_hidden_states += (hidden_states,)
<16>:<add> # we use a scale on all norms (even last layer) to allow scanning
<add> hidden_states, _ = nn.scan(
<add> layer,
<add> variable_axes={"params": 0},
<add> split_rngs={"params": True, "dropout": True},
<add> in_axes=(
<add> nn.broadcast,
<add> nn.broadcast,
<add> nn.broadcast,
<add> nn.
|
<s>Collection(nn.Module):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
<0> # decoder layers
<1> all_hidden_states = () if output_hidden_states else None
<2> all_self_attns = () if output_attentions else None
<3> all_cross_attentions = (
<4> () if (output_attentions and encoder_hidden_states is not None) else None
<5> )
<6>
<7> n_layers = self.config.decoder_layers
<8> layer = (
<9> remat(FlaxBartDecoderLayer, static_argnums=(4, 5, 6))
<10> if self.config.gradient_checkpointing
<11> else FlaxBartDecoderLayer
<12> )
<13> for i in range(n_layers):
<14> if output_hidden_states:
<15> all_hidden_states += (hidden_states,)
<16> # final layernorm on the output of the last layer
<17> # or every 6 layers for Swin v2
<18> add_norm = (
<19> self.config.ln_positions == "swinv2" and ((i + 1) % 6 == 0)
<20> ) or (self.config.use_final_ln_decoder and (i == n_layers - 1))
<21> # we don't need to scale the norm for the last layer
<22> use_scale = i != n_layers - 1
<23> layer_outputs = layer(
<24> self.config, dtype=self.dtype, add_norm=add_norm, use_scale=use_scale
<25> )(
<26> hidden_states,
<27> attention_mask,
<28> encoder_hidden_states,
<29> encoder_attention_mask,
</s>
|
===========below chunk 0===========
<s>):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# offset: 1
output_attentions,
deterministic,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
outputs = [
hidden_states,
all_hidden_states,
all_self_attns,
all_cross_attentions,
]
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
===========unchanged ref 0===========
at: dalle_mini.model.modeling
remat = nn_partitioning.remat
at: dalle_mini.model.modeling.FlaxBartDecoderLayerCollection
config: DalleBartConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
at: transformers.modeling_flax_outputs
FlaxBaseModelOutputWithPastAndCrossAttentions(**kwargs: _VT)
FlaxBaseModelOutputWithPastAndCrossAttentions(map: Mapping[_KT, _VT], **kwargs: _VT)
FlaxBaseModelOutputWithPastAndCrossAttentions(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
===========changed ref 0===========
# module: dalle_mini.model.modeling
class FlaxBartDecoderLayer(nn.Module):
"""
Edits:
- no bias
- use custom FlaxBartAttention
"""
config: DalleBartConfig
dtype: jnp.dtype = jnp.float32
add_norm: bool = False
+ use_scale: bool = True
- use_scale: bool = False
===========changed ref 1===========
# module: tools.train.train
class TrainState(train_state.TrainState):
+ @classmethod
+ def create(cls, *, apply_fn, params, tx, **kwargs):
+ opt_state = tx.init(cls.unscan(params))
+ return cls(
+ step=0,
+ apply_fn=apply_fn,
+ params=params,
+ tx=tx,
+ opt_state=opt_state,
+ **kwargs,
+ )
+
===========changed ref 2===========
# module: tools.train.train
class TrainState(train_state.TrainState):
+ @staticmethod
+ def unscan(params):
+ params = unfreeze(params)
+ for l in ["encoder", "decoder"]:
+ params["model"][l]["layers"] = jax.tree_map(
+ lambda x: {f"{i}": x[i] for i in range(len(x))},
+ params["model"][l]["layers"],
+ )
+ params = freeze(params)
+ return params
+
===========changed ref 3===========
# module: tools.train.train
class TrainState(train_state.TrainState):
+ @staticmethod
+ def rescan(params):
+ params = unfreeze(params)
+ for l in ["encoder", "decoder"]:
+ params["model"][l]["layers"] = jax.tree_map(
+ lambda x: jnp.stack([x[f"{i}"] for i in range(len(x))]),
+ params["model"][l]["layers"],
+ is_leaf=lambda x: "0" in x,
+ )
+ params = freeze(params)
+ return params
+
===========changed ref 4===========
# module: tools.train.train
class TrainState(train_state.TrainState):
+ def apply_gradients(self, *, grads, **kwargs):
+ params = self.unscan(self.params)
+ updates, new_opt_state = self.tx.update(
+ self.unscan(grads), self.opt_state, params
+ )
+ params = optax.apply_updates(params, updates)
+ return self.replace(
+ step=self.step + 1,
+ params=self.rescan(params),
+ opt_state=new_opt_state,
+ **kwargs,
+ )
+
===========changed ref 5===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
+ if self.config.use_scan:
+ hidden_states = hidden_states[0]
+
res_gain = (
deepnet_gain["encoder"]["alpha"](self.config)
if self.config.use_deepnet_scaling
else 1
)
embed_dim = self.config.d_model
residual = hidden_states
if self.config.ln_positions in ["normformer", "cogview", "preln"]:
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(hidden_states)
hidden_states, attn_weights = FlaxBartAttention(
config=self.config,
embed_dim=embed_dim,
num_heads=self.config.encoder_attention_heads,
dropout=self.config.attention_dropout,
bias=self.config.use_bias,
dtype=self.dtype,
is_encoder=True,
q_length=self.config.max_text_length,
k_length=self.config.max_text_length,
)(hidden_states=hidden_states, attention_mask=attention_mask)
if self.config.ln_positions in ["normformer", "swinv2", "cogview"]:
hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
hidden_states
)
hidden_states = nn.Dropout(rate=self.config.dropout)(
hidden_states, deterministic=deterministic
</s>
|
tools.train.train/TrainState.apply_gradients
|
Modified
|
borisdayma~dalle-mini
|
b993d27f4707b7fbff7c09c4ae897067f6751956
|
feat: vmap optimizer (#166)
|
<0>:<add> grads = split_params(grads)
<add> params = split_params(self.params)
<del> params = self.unscan(self.params)
<1>:<add> opt_state = {}
<add> # we loop over keys: "standard", "scanned_encoder", "scanned_decoder"
<add> for k, param in params.items():
<add> update_fn = self.tx[k].update
<add> if "scanned" in k:
<add> update_fn = jax.vmap(update_fn, in_axes=(0, 0, 0), out_axes=(0, 0))
<add> updates, new_opt_state = update_fn(grads[k], self.opt_state[k], param)
<del> updates, new_opt_state = self.tx.update(
<2>:<del> self.unscan(grads), self.opt_state, params
<3>:<del> )
<4>:<add> params[k] = optax.apply_updates(param, updates)
<del> params = optax.apply_updates(params, updates)
<5>:<add> opt_state[k] = new_opt_state
<add> params = unsplit_params(params)
<add>
<7>:<add> params=params,
<del> params=self.rescan(params),
<8>:<add> opt_state=freeze(opt_state),
<del> opt_state=new_opt_state,
|
# module: tools.train.train
+ class TrainState(struct.PyTreeNode):
- class TrainState(train_state.TrainState):
def apply_gradients(self, *, grads, **kwargs):
<0> params = self.unscan(self.params)
<1> updates, new_opt_state = self.tx.update(
<2> self.unscan(grads), self.opt_state, params
<3> )
<4> params = optax.apply_updates(params, updates)
<5> return self.replace(
<6> step=self.step + 1,
<7> params=self.rescan(params),
<8> opt_state=new_opt_state,
<9> **kwargs,
<10> )
<11>
|
===========changed ref 0===========
# module: tools.train.train
+ class TrainState(struct.PyTreeNode):
- class TrainState(train_state.TrainState):
+ step: int
+ params: core.FrozenDict[str, Any]
+ opt_state: optax.OptState
+ apply_fn: Callable = struct.field(pytree_node=False)
+ tx: optax.GradientTransformation = struct.field(pytree_node=False)
dropout_rng: jnp.ndarray = None
epoch: int = 0
train_time: float = 0.0 # total time the model trained
train_samples: int = 0 # number of samples seen
===========changed ref 1===========
# module: tools.train.train
+ def split_params(data):
+ """Split params between scanned and non-scanned"""
+ flat = traverse_util.flatten_dict(unfreeze(data))
+ split = {"standard": {}, "scanned_encoder": {}, "scanned_decoder": {}}
+ for k, v in flat.items():
+ if "FlaxBartEncoderLayers" in k:
+ split["scanned_encoder"][k] = v
+ elif "FlaxBartDecoderLayers" in k:
+ split["scanned_decoder"][k] = v
+ else:
+ split["standard"][k] = v
+ for k, v in split.items():
+ split[k] = freeze(traverse_util.unflatten_dict(v))
+ return split
+
|
tools.train.train/TrainState.create
|
Modified
|
borisdayma~dalle-mini
|
b993d27f4707b7fbff7c09c4ae897067f6751956
|
feat: vmap optimizer (#166)
|
<0>:<add> opt_state = {}
<add> for k, p in split_params(params).items():
<add> init_fn = tx[k].init
<add> if "scanned" in k:
<add> init_fn = jax.vmap(init_fn)
<add> opt_state[k] = init_fn(p)
<del> opt_state = tx.init(cls.unscan(params))
<6>:<add> opt_state=freeze(opt_state),
<del> opt_state=opt_state,
|
# module: tools.train.train
+ class TrainState(struct.PyTreeNode):
- class TrainState(train_state.TrainState):
@classmethod
def create(cls, *, apply_fn, params, tx, **kwargs):
<0> opt_state = tx.init(cls.unscan(params))
<1> return cls(
<2> step=0,
<3> apply_fn=apply_fn,
<4> params=params,
<5> tx=tx,
<6> opt_state=opt_state,
<7> **kwargs,
<8> )
<9>
|
===========unchanged ref 0===========
at: tools.train.train.TrainState
train_time: float = 0.0 # total time the model trained
train_samples: int = 0 # number of samples seen
at: tools.train.train.unsplit_params
flat = {}
at: typing
Callable = _CallableType(collections.abc.Callable, 2)
===========changed ref 0===========
# module: tools.train.train
+ def unsplit_params(data):
+ flat = {}
+ for k in ["standard", "scanned_encoder", "scanned_decoder"]:
+ flat.update(traverse_util.flatten_dict(unfreeze(data[k])))
+ return freeze(traverse_util.unflatten_dict(flat))
+
===========changed ref 1===========
# module: tools.train.train
+ class TrainState(struct.PyTreeNode):
- class TrainState(train_state.TrainState):
+ step: int
+ params: core.FrozenDict[str, Any]
+ opt_state: optax.OptState
+ apply_fn: Callable = struct.field(pytree_node=False)
+ tx: optax.GradientTransformation = struct.field(pytree_node=False)
dropout_rng: jnp.ndarray = None
epoch: int = 0
train_time: float = 0.0 # total time the model trained
train_samples: int = 0 # number of samples seen
===========changed ref 2===========
# module: tools.train.train
+ def split_params(data):
+ """Split params between scanned and non-scanned"""
+ flat = traverse_util.flatten_dict(unfreeze(data))
+ split = {"standard": {}, "scanned_encoder": {}, "scanned_decoder": {}}
+ for k, v in flat.items():
+ if "FlaxBartEncoderLayers" in k:
+ split["scanned_encoder"][k] = v
+ elif "FlaxBartDecoderLayers" in k:
+ split["scanned_decoder"][k] = v
+ else:
+ split["standard"][k] = v
+ for k, v in split.items():
+ split[k] = freeze(traverse_util.unflatten_dict(v))
+ return split
+
===========changed ref 3===========
# module: tools.train.train
+ class TrainState(struct.PyTreeNode):
- class TrainState(train_state.TrainState):
def apply_gradients(self, *, grads, **kwargs):
+ grads = split_params(grads)
+ params = split_params(self.params)
- params = self.unscan(self.params)
+ opt_state = {}
+ # we loop over keys: "standard", "scanned_encoder", "scanned_decoder"
+ for k, param in params.items():
+ update_fn = self.tx[k].update
+ if "scanned" in k:
+ update_fn = jax.vmap(update_fn, in_axes=(0, 0, 0), out_axes=(0, 0))
+ updates, new_opt_state = update_fn(grads[k], self.opt_state[k], param)
- updates, new_opt_state = self.tx.update(
- self.unscan(grads), self.opt_state, params
- )
+ params[k] = optax.apply_updates(param, updates)
- params = optax.apply_updates(params, updates)
+ opt_state[k] = new_opt_state
+ params = unsplit_params(params)
+
return self.replace(
step=self.step + 1,
+ params=params,
- params=self.rescan(params),
+ opt_state=freeze(opt_state),
- opt_state=new_opt_state,
**kwargs,
)
|
dalle_mini.model.modeling/FlaxBartEncoderLayerCollection.__call__
|
Modified
|
borisdayma~dalle-mini
|
b993d27f4707b7fbff7c09c4ae897067f6751956
|
feat: vmap optimizer (#166)
|
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayerCollection(nn.Module):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
<0> all_hidden_states = () if output_hidden_states else None
<1> all_self_attns = () if output_attentions else None
<2>
<3> n_layers = self.config.encoder_layers
<4> layer = (
<5> remat(
<6> FlaxBartEncoderLayer,
<7> static_argnums=(2, 3),
<8> prevent_cse=not self.config.use_scan,
<9> )
<10> if self.config.gradient_checkpointing
<11> else FlaxBartEncoderLayer
<12> )
<13>
<14> if self.config.use_scan:
<15> # all blocks are the same so we use nn.scan
<16> assert not output_attentions, "cannot scan with output_attentions"
<17> assert not output_hidden_states, "cannot scan with output_hidden_states"
<18> hidden_states = (hidden_states,)
<19> # we use a scale on all norms (even last layer) to allow scanning
<20> hidden_states, _ = nn.scan(
<21> layer,
<22> variable_axes={"params": 0},
<23> split_rngs={"params": True, "dropout": True},
<24> in_axes=(nn.broadcast, nn.broadcast, nn.broadcast),
<25> length=n_layers,
<26> )(
<27> self.config,
<28> dtype=self.dtype,
<29> add_norm=self.config.ln_positions == "postln",
<30> name="FlaxBartEncoderLayers",
<31> )(
<32> hidden_states,
<33> attention_mask,
<34> output_attentions,
<35> deterministic,
<36> )
<37> hidden_states = hidden_</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayerCollection(nn.Module):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# offset: 1
else:
for i in range(n_layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
# final layernorm on the output of the last layer
# or every 6 layers for Swin v2
add_norm = self.config.ln_positions == "postln" or (
self.config.ln_positions == "swinv2"
and ((i + 1) % 6 == 0)
and (i != n_layers - 1)
)
# we don't need to scale the norm for the last layer
use_scale = i != n_layers - 1
layer_outputs = layer(
self.config,
dtype=self.dtype,
add_norm=add_norm,
use_scale=use_scale,
name=f"FlaxBartEncoderLayer_{i}",
)(
hidden_states,
attention_mask,
output_attentions,
deterministic,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
# add hidden states from the last layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
# postln is already applied in every layer
if self.config.use_final_ln_encoder and self.config.ln_positions != "postln":
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
</s>
===========below chunk 1===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayerCollection(nn.Module):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# offset: 2
<s>.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(hidden_states)
outputs = [
hidden_states,
all_hidden_states,
all_self_attns,
]
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
===========unchanged ref 0===========
at: dalle_mini.model.modeling
remat = nn_partitioning.remat
at: dalle_mini.model.modeling.FlaxBartEncoderLayerCollection
config: DalleBartConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
at: transformers.modeling_flax_outputs
FlaxBaseModelOutput(**kwargs: _VT)
FlaxBaseModelOutput(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
FlaxBaseModelOutput(map: Mapping[_KT, _VT], **kwargs: _VT)
===========changed ref 0===========
# module: tools.train.train
+ def unsplit_params(data):
+ flat = {}
+ for k in ["standard", "scanned_encoder", "scanned_decoder"]:
+ flat.update(traverse_util.flatten_dict(unfreeze(data[k])))
+ return freeze(traverse_util.unflatten_dict(flat))
+
===========changed ref 1===========
# module: tools.train.train
+ class TrainState(struct.PyTreeNode):
- class TrainState(train_state.TrainState):
- @staticmethod
- def unscan(params):
- params = unfreeze(params)
- for l in ["encoder", "decoder"]:
- params["model"][l]["layers"] = jax.tree_map(
- lambda x: {f"{i}": x[i] for i in range(len(x))},
- params["model"][l]["layers"],
- )
- params = freeze(params)
- return params
-
===========changed ref 2===========
# module: tools.train.train
+ class TrainState(struct.PyTreeNode):
- class TrainState(train_state.TrainState):
- @staticmethod
- def rescan(params):
- params = unfreeze(params)
- for l in ["encoder", "decoder"]:
- params["model"][l]["layers"] = jax.tree_map(
- lambda x: jnp.stack([x[f"{i}"] for i in range(len(x))]),
- params["model"][l]["layers"],
- is_leaf=lambda x: "0" in x,
- )
- params = freeze(params)
- return params
-
===========changed ref 3===========
# module: tools.train.train
+ class TrainState(struct.PyTreeNode):
- class TrainState(train_state.TrainState):
+ step: int
+ params: core.FrozenDict[str, Any]
+ opt_state: optax.OptState
+ apply_fn: Callable = struct.field(pytree_node=False)
+ tx: optax.GradientTransformation = struct.field(pytree_node=False)
dropout_rng: jnp.ndarray = None
epoch: int = 0
train_time: float = 0.0 # total time the model trained
train_samples: int = 0 # number of samples seen
===========changed ref 4===========
# module: tools.train.train
+ class TrainState(struct.PyTreeNode):
- class TrainState(train_state.TrainState):
@classmethod
def create(cls, *, apply_fn, params, tx, **kwargs):
+ opt_state = {}
+ for k, p in split_params(params).items():
+ init_fn = tx[k].init
+ if "scanned" in k:
+ init_fn = jax.vmap(init_fn)
+ opt_state[k] = init_fn(p)
- opt_state = tx.init(cls.unscan(params))
return cls(
step=0,
apply_fn=apply_fn,
params=params,
tx=tx,
+ opt_state=freeze(opt_state),
- opt_state=opt_state,
**kwargs,
)
===========changed ref 5===========
# module: tools.train.train
+ def split_params(data):
+ """Split params between scanned and non-scanned"""
+ flat = traverse_util.flatten_dict(unfreeze(data))
+ split = {"standard": {}, "scanned_encoder": {}, "scanned_decoder": {}}
+ for k, v in flat.items():
+ if "FlaxBartEncoderLayers" in k:
+ split["scanned_encoder"][k] = v
+ elif "FlaxBartDecoderLayers" in k:
+ split["scanned_decoder"][k] = v
+ else:
+ split["standard"][k] = v
+ for k, v in split.items():
+ split[k] = freeze(traverse_util.unflatten_dict(v))
+ return split
+
|
|
dalle_mini.model.modeling/FlaxBartDecoderLayerCollection.__call__
|
Modified
|
borisdayma~dalle-mini
|
b993d27f4707b7fbff7c09c4ae897067f6751956
|
feat: vmap optimizer (#166)
|
<s>Collection(nn.Module):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
<0> # decoder layers
<1> all_hidden_states = () if output_hidden_states else None
<2> all_self_attns = () if output_attentions else None
<3> all_cross_attentions = (
<4> () if (output_attentions and encoder_hidden_states is not None) else None
<5> )
<6>
<7> n_layers = self.config.decoder_layers
<8> layer = (
<9> remat(
<10> FlaxBartDecoderLayer,
<11> static_argnums=(4, 5, 6),
<12> prevent_cse=not self.config.use_scan,
<13> )
<14> if self.config.gradient_checkpointing
<15> else FlaxBartDecoderLayer
<16> )
<17>
<18> if self.config.use_scan:
<19> # all blocks are the same so we use nn.scan
<20> assert not output_attentions, "cannot scan with output_attentions"
<21> assert not output_hidden_states, "cannot scan with output_hidden_states"
<22> hidden_states = (hidden_states,)
<23> # we use a scale on all norms (even last layer) to allow scanning
<24> hidden_states, _ = nn.scan(
<25> layer,
<26> variable_axes={"params": 0},
<27> split_rngs={"params": True, "dropout": True},
<28> in_axes=(
<29> nn.broadcast,
<30> nn.broadcast,
<31> nn.broadcast,
<32> nn.broadcast,
<33> nn.broadcast,
<34> nn.broadcast,
<35> ),</s>
|
===========below chunk 0===========
<s>):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# offset: 1
)(
self.config,
dtype=self.dtype,
add_norm=self.config.ln_positions == "postln",
name="FlaxBartEncoderLayers",
)(
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
init_cache,
output_attentions,
deterministic,
)
hidden_states = hidden_states[0]
else:
for i in range(n_layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
# final layernorm on the output of the last layer
# or every 6 layers for Swin v2
add_norm = self.config.ln_positions == "postln" or (
self.config.ln_positions == "swinv2"
and ((i + 1) % 6 == 0)
and (i != n_layers - 1)
)
# we don't need to scale the norm for the last layer
use_scale = i != n_layers - 1
layer_outputs = layer(
self.config,
dtype=self.dtype,
add_norm=add_norm,
use_scale=use_scale,
name=f"FlaxBartDecoderLayer_{i}",
)(
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
init_cache,
output_attentions,
deterministic,
)
hidden_states = layer</s>
===========below chunk 1===========
<s>):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# offset: 2
<s>mask,
init_cache,
output_attentions,
deterministic,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
# postln is already applied in every layer
if self.config.use_final_ln_decoder and self.config.ln_positions != "postln":
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(hidden_states)
outputs = [
hidden_states,
all_hidden_states,
all_self_attns,
all_cross_attentions,
]
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_
===========unchanged ref 0===========
at: dalle_mini.model.modeling
remat = nn_partitioning.remat
at: dalle_mini.model.modeling.FlaxBartDecoderLayerCollection
config: DalleBartConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
at: transformers.modeling_flax_outputs
FlaxBaseModelOutputWithPastAndCrossAttentions(**kwargs: _VT)
FlaxBaseModelOutputWithPastAndCrossAttentions(map: Mapping[_KT, _VT], **kwargs: _VT)
FlaxBaseModelOutputWithPastAndCrossAttentions(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
===========changed ref 0===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayerCollection(nn.Module):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
n_layers = self.config.encoder_layers
layer = (
remat(
FlaxBartEncoderLayer,
static_argnums=(2, 3),
prevent_cse=not self.config.use_scan,
)
if self.config.gradient_checkpointing
else FlaxBartEncoderLayer
)
if self.config.use_scan:
# all blocks are the same so we use nn.scan
assert not output_attentions, "cannot scan with output_attentions"
assert not output_hidden_states, "cannot scan with output_hidden_states"
hidden_states = (hidden_states,)
# we use a scale on all norms (even last layer) to allow scanning
hidden_states, _ = nn.scan(
layer,
variable_axes={"params": 0},
split_rngs={"params": True, "dropout": True},
in_axes=(nn.broadcast, nn.broadcast, nn.broadcast),
length=n_layers,
)(
self.config,
dtype=self.dtype,
add_norm=self.config.ln_positions == "postln",
name="FlaxBartEncoderLayers",
)(
hidden_states,
attention_mask,
output_attentions,
deterministic,
)
hidden_states = hidden_states[0]
else:
for i in range(n_layers):
if output_hidden_states:
all_hidden_states += (hidden</s>
|
|
dalle_mini.model.modeling/FlaxBartEncoder.__call__
|
Modified
|
borisdayma~dalle-mini
|
b993d27f4707b7fbff7c09c4ae897067f6751956
|
feat: vmap optimizer (#166)
|
<21>:<add> if self.final_ln is None:
<add> final_output = outputs[0]
<add> else:
<add> final_output = self.final_ln(outputs[0])
<add>
<22>:<add> return (final_output,) + outputs[1:]
<del> return outputs
<25>:<add> last_hidden_state=final_output,
<del> last_hidden_state=outputs.last_hidden_state,
|
# module: dalle_mini.model.modeling
class FlaxBartEncoder(nn.Module):
def __call__(
self,
input_ids,
attention_mask,
position_ids,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
<0> input_shape = input_ids.shape
<1> input_ids = input_ids.reshape(-1, input_shape[-1])
<2>
<3> hidden_states = self.embed_tokens(input_ids) * self.embed_scale
<4>
<5> if self.config.use_absolute_position_embeddings:
<6> embed_pos = self.embed_positions(position_ids + self.offset)
<7> hidden_states = hidden_states + embed_pos
<8>
<9> hidden_states = self.layernorm_embedding(hidden_states)
<10> hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
<11>
<12> outputs = self.layers(
<13> hidden_states,
<14> attention_mask,
<15> deterministic=deterministic,
<16> output_attentions=output_attentions,
<17> output_hidden_states=output_hidden_states,
<18> return_dict=return_dict,
<19> )
<20>
<21> if not return_dict:
<22> return outputs
<23>
<24> return FlaxBaseModelOutput(
<25> last_hidden_state=outputs.last_hidden_state,
<26> hidden_states=outputs.hidden_states,
<27> attentions=outputs.attentions,
<28> )
<29>
|
===========unchanged ref 0===========
at: dalle_mini.model.modeling.FlaxBartEncoder
config: DalleBartConfig
embed_tokens: nn.Embed
at: dalle_mini.model.modeling.FlaxBartEncoder.setup
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
self.offset = 0
self.embed_positions = nn.Embed(
self.config.max_text_length + self.offset, # image length for BOS
embed_dim,
embedding_init=jax.nn.initializers.normal(self.config.init_std),
)
self.layers = FlaxBartEncoderLayerCollection(self.config, self.dtype)
self.layernorm_embedding = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05
)
self.final_ln = None
self.final_ln = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)
at: transformers.modeling_flax_outputs
FlaxBaseModelOutput(**kwargs: _VT)
FlaxBaseModelOutput(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
FlaxBaseModelOutput(map: Mapping[_KT, _VT], **kwargs: _VT)
at: transformers.modeling_flax_outputs.FlaxBaseModelOutput
last_hidden_state: jnp.ndarray = None
hidden_states: Optional[Tuple[jnp.ndarray]] = None
attentions: Optional[Tuple[jnp.ndarray]] = None
===========changed ref 0===========
# module: tools.train.train
+ def unsplit_params(data):
+ flat = {}
+ for k in ["standard", "scanned_encoder", "scanned_decoder"]:
+ flat.update(traverse_util.flatten_dict(unfreeze(data[k])))
+ return freeze(traverse_util.unflatten_dict(flat))
+
===========changed ref 1===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayerCollection(nn.Module):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
n_layers = self.config.encoder_layers
layer = (
remat(
FlaxBartEncoderLayer,
static_argnums=(2, 3),
prevent_cse=not self.config.use_scan,
)
if self.config.gradient_checkpointing
else FlaxBartEncoderLayer
)
if self.config.use_scan:
# all blocks are the same so we use nn.scan
assert not output_attentions, "cannot scan with output_attentions"
assert not output_hidden_states, "cannot scan with output_hidden_states"
hidden_states = (hidden_states,)
# we use a scale on all norms (even last layer) to allow scanning
hidden_states, _ = nn.scan(
layer,
variable_axes={"params": 0},
split_rngs={"params": True, "dropout": True},
in_axes=(nn.broadcast, nn.broadcast, nn.broadcast),
length=n_layers,
)(
self.config,
dtype=self.dtype,
add_norm=self.config.ln_positions == "postln",
name="FlaxBartEncoderLayers",
)(
hidden_states,
attention_mask,
output_attentions,
deterministic,
)
hidden_states = hidden_states[0]
else:
for i in range(n_layers):
if output_hidden_states:
all_hidden_states += (hidden</s>
===========changed ref 2===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayerCollection(nn.Module):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# offset: 1
<s>
for i in range(n_layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
# final layernorm on the output of the last layer
# or every 6 layers for Swin v2
add_norm = self.config.ln_positions == "postln" or (
self.config.ln_positions == "swinv2"
and ((i + 1) % 6 == 0)
and (i != n_layers - 1)
)
# we don't need to scale the norm for the last layer
use_scale = i != n_layers - 1
layer_outputs = layer(
self.config,
dtype=self.dtype,
add_norm=add_norm,
use_scale=use_scale,
name=f"FlaxBartEncoderLayer_{i}",
)(
hidden_states,
attention_mask,
output_attentions,
deterministic,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
# add hidden states from the last layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
- # postln is already applied in every layer
- if self.config.use_final_ln_encoder and self.config.ln_positions != "postln":
- hidden_states = norm(
- self.config.ln_type,
</s>
===========changed ref 3===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayerCollection(nn.Module):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# offset: 2
<s> dtype=self.dtype,
- epsilon=1e-05,
- use_scale=self.config.force_ln_scale,
- )(hidden_states)
-
outputs = [
hidden_states,
all_hidden_states,
all_self_attns,
]
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
===========changed ref 4===========
# module: tools.train.train
+ class TrainState(struct.PyTreeNode):
- class TrainState(train_state.TrainState):
- @staticmethod
- def unscan(params):
- params = unfreeze(params)
- for l in ["encoder", "decoder"]:
- params["model"][l]["layers"] = jax.tree_map(
- lambda x: {f"{i}": x[i] for i in range(len(x))},
- params["model"][l]["layers"],
- )
- params = freeze(params)
- return params
-
|
dalle_mini.model.modeling/FlaxBartDecoder.__call__
|
Modified
|
borisdayma~dalle-mini
|
b993d27f4707b7fbff7c09c4ae897067f6751956
|
feat: vmap optimizer (#166)
|
<24>:<add> if self.final_ln is None:
<add> final_output = outputs[0]
<add> else:
<add> final_output = self.final_ln(outputs[0])
<add>
<25>:<add> return (final_output,) + outputs[1:]
<del> return outputs
<28>:<add> last_hidden_state=final_output,
<del> last_hidden_state=outputs.last_hidden_state,
|
<s>Decoder(nn.Module):
def __call__(
self,
input_ids,
attention_mask,
position_ids,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
<0> input_shape = input_ids.shape
<1> input_ids = input_ids.reshape(-1, input_shape[-1])
<2>
<3> hidden_states = self.embed_tokens(input_ids) * self.embed_scale
<4>
<5> if self.config.use_absolute_position_embeddings:
<6> embed_pos = self.embed_positions(position_ids + self.offset)
<7> hidden_states = hidden_states + embed_pos
<8>
<9> hidden_states = self.layernorm_embedding(hidden_states)
<10> hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
<11>
<12> outputs = self.layers(
<13> hidden_states,
<14> attention_mask,
<15> encoder_hidden_states,
<16> encoder_attention_mask,
<17> deterministic=deterministic,
<18> init_cache=init_cache,
<19> output_attentions=output_attentions,
<20> output_hidden_states=output_hidden_states,
<21> return_dict=return_dict,
<22> )
<23>
<24> if not return_dict:
<25> return outputs
<26>
<27> return FlaxBaseModelOutputWithPastAndCrossAttentions(
<28> last_hidden_state=outputs.last_hidden_state,
<29> hidden_states=outputs.hidden_states,
<30> attentions=outputs.attentions,
<31> cross_attentions=outputs.cross_attentions,
<32> )
<33>
|
===========unchanged ref 0===========
at: dalle_mini.model.modeling
norm(type, *args, **kwargs)
at: dalle_mini.model.modeling.FlaxBartDecoder
config: DalleBartConfig
embed_tokens: nn.Embed
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
at: dalle_mini.model.modeling.FlaxBartDecoder.setup
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
self.embed_scale = (
math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
)
self.offset = 0
self.embed_positions = nn.Embed(
self.config.image_length + self.offset, # image length for BOS
embed_dim,
embedding_init=jax.nn.initializers.normal(self.config.init_std),
)
self.layers = FlaxBartDecoderLayerCollection(self.config, self.dtype)
self.layernorm_embedding = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05
)
===========changed ref 0===========
# module: dalle_mini.model.modeling
class FlaxBartEncoder(nn.Module):
def __call__(
self,
input_ids,
attention_mask,
position_ids,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
input_shape = input_ids.shape
input_ids = input_ids.reshape(-1, input_shape[-1])
hidden_states = self.embed_tokens(input_ids) * self.embed_scale
if self.config.use_absolute_position_embeddings:
embed_pos = self.embed_positions(position_ids + self.offset)
hidden_states = hidden_states + embed_pos
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
outputs = self.layers(
hidden_states,
attention_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
+ if self.final_ln is None:
+ final_output = outputs[0]
+ else:
+ final_output = self.final_ln(outputs[0])
+
if not return_dict:
+ return (final_output,) + outputs[1:]
- return outputs
return FlaxBaseModelOutput(
+ last_hidden_state=final_output,
- last_hidden_state=outputs.last_hidden_state,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
===========changed ref 1===========
# module: tools.train.train
+ def unsplit_params(data):
+ flat = {}
+ for k in ["standard", "scanned_encoder", "scanned_decoder"]:
+ flat.update(traverse_util.flatten_dict(unfreeze(data[k])))
+ return freeze(traverse_util.unflatten_dict(flat))
+
===========changed ref 2===========
# module: tools.train.train
+ class TrainState(struct.PyTreeNode):
- class TrainState(train_state.TrainState):
- @staticmethod
- def unscan(params):
- params = unfreeze(params)
- for l in ["encoder", "decoder"]:
- params["model"][l]["layers"] = jax.tree_map(
- lambda x: {f"{i}": x[i] for i in range(len(x))},
- params["model"][l]["layers"],
- )
- params = freeze(params)
- return params
-
===========changed ref 3===========
# module: tools.train.train
+ class TrainState(struct.PyTreeNode):
- class TrainState(train_state.TrainState):
- @staticmethod
- def rescan(params):
- params = unfreeze(params)
- for l in ["encoder", "decoder"]:
- params["model"][l]["layers"] = jax.tree_map(
- lambda x: jnp.stack([x[f"{i}"] for i in range(len(x))]),
- params["model"][l]["layers"],
- is_leaf=lambda x: "0" in x,
- )
- params = freeze(params)
- return params
-
===========changed ref 4===========
# module: tools.train.train
+ class TrainState(struct.PyTreeNode):
- class TrainState(train_state.TrainState):
+ step: int
+ params: core.FrozenDict[str, Any]
+ opt_state: optax.OptState
+ apply_fn: Callable = struct.field(pytree_node=False)
+ tx: optax.GradientTransformation = struct.field(pytree_node=False)
dropout_rng: jnp.ndarray = None
epoch: int = 0
train_time: float = 0.0 # total time the model trained
train_samples: int = 0 # number of samples seen
===========changed ref 5===========
# module: tools.train.train
+ class TrainState(struct.PyTreeNode):
- class TrainState(train_state.TrainState):
@classmethod
def create(cls, *, apply_fn, params, tx, **kwargs):
+ opt_state = {}
+ for k, p in split_params(params).items():
+ init_fn = tx[k].init
+ if "scanned" in k:
+ init_fn = jax.vmap(init_fn)
+ opt_state[k] = init_fn(p)
- opt_state = tx.init(cls.unscan(params))
return cls(
step=0,
apply_fn=apply_fn,
params=params,
tx=tx,
+ opt_state=freeze(opt_state),
- opt_state=opt_state,
**kwargs,
)
|
app.streamlit.backend/get_images_from_backend
|
Modified
|
borisdayma~dalle-mini
|
d6e9f876a356a7684701216b859956581ae4fab3
|
Half the number of requests.
|
<2>:<add> json = r.json()
<add> images = json["images"]
<del> images = r.json()["images"]
<4>:<add> version = json.get("version", "unknown")
<add> return {"images": images, "version": version}
<del> return images
|
# module: app.streamlit.backend
def get_images_from_backend(prompt, backend_url):
<0> r = requests.post(backend_url, json={"prompt": prompt})
<1> if r.status_code == 200:
<2> images = r.json()["images"]
<3> images = [Image.open(BytesIO(base64.b64decode(img))) for img in images]
<4> return images
<5> else:
<6> raise ServiceError(r.status_code)
<7>
|
===========unchanged ref 0===========
at: PIL.Image
open(fp, mode="r", formats=None)
at: base64
b64decode(s: _decodable, altchars: Optional[bytes]=..., validate: bool=...) -> bytes
at: io
BytesIO(initial_bytes: bytes=...)
at: requests.api
post(url: Union[Text, bytes], data: _Data=..., json=..., **kwargs) -> Response
at: requests.models.Response
__attrs__ = [
"_content",
"status_code",
"headers",
"url",
"history",
"encoding",
"reason",
"cookies",
"elapsed",
"request",
]
json(**kwargs) -> Any
at: requests.models.Response.__init__
self.status_code = None
|
tools.train.train/split_params
|
Modified
|
borisdayma~dalle-mini
|
209ade7a3a5e94205f8c4f19289890de43da74d4
|
Merge pull request #171 from borisdayma/reduced-requests
|
<10>:<add> # remove empty keys
<add> split = {k: v for k, v in split.items() if v}
|
# module: tools.train.train
def split_params(data):
<0> """Split params between scanned and non-scanned"""
<1> flat = traverse_util.flatten_dict(unfreeze(data))
<2> split = {"standard": {}, "scanned_encoder": {}, "scanned_decoder": {}}
<3> for k, v in flat.items():
<4> if "FlaxBartEncoderLayers" in k:
<5> split["scanned_encoder"][k] = v
<6> elif "FlaxBartDecoderLayers" in k:
<7> split["scanned_decoder"][k] = v
<8> else:
<9> split["standard"][k] = v
<10> for k, v in split.items():
<11> split[k] = freeze(traverse_util.unflatten_dict(v))
<12> return split
<13>
|
===========unchanged ref 0===========
at: tools.train.train.TrainingArguments
output_dir: str = field(
metadata={
"help": "The output directory where the model predictions and checkpoints will be written."
},
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory. "
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(
default=False, metadata={"help": "Whether to run eval on the validation set."}
)
per_device_train_batch_size: int = field(
default=8,
metadata={"help": "Batch size per data parallel device for training."},
)
per_device_eval_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Batch size per data parallel device for evaluation. Same as training batch size if not set."
},
)
gradient_accumulation_steps: int = field(
default=1,
metadata={
"help": "Number of updates steps to accumulate before performing an update pass."
},
)
gradient_checkpointing: bool = field(
default=False, metadata={"help": "Use gradient checkpointing."}
)
learning_rate: float = field(
default=5e-5, metadata={"help": "The initial learning rate."}
)
optim: str = field(
default="distributed_shampoo",
metadata={
"help": 'The optimizer to use. Can be "distributed_shampoo" (default), "adam" or "adafactor"'
},
)
beta1: float = field(
default=0.9,
metadata={"help": "Beta1 for Adam & Distributed Shampoo."},
)
===========unchanged ref 1===========
beta2: float = field(
default=0.999,
metadata={"help": "Beta2 for for Adam & Distributed Shampoo."},
)
adam_epsilon: float = field(
default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."}
)
max_grad_norm: float = field(
default=1.0, metadata={"help": "Max gradient norm for Adafactor."}
)
block_size: int = field(
default=1024,
metadata={"help": "Chunked size for large layers with Distributed Shampoo."},
)
preconditioning_compute_steps: int = field(
default=10, metadata={"help": "Number of steps to update preconditioner."}
)
skip_preconditioning_dim_size_gt: int = field(
default=4096,
metadata={"help": "Max size for preconditioning with Distributed Shampoo."},
)
graft_type: str = field(
default="rmsprop_normalized",
metadata={
"help": "The type of grafting to use. Can be 'rmsprop_normalized' (default), 'rmsprop', 'adagrad', 'adagrad_normalized', 'sgd' or 'sqrt_n'"
},
)
optim_quantized: bool = field(
default=False,
metadata={
"help": "Whether to quantize optimizer (only supported with Distributed Shampoo)."
},
)
shard_shampoo_across: str = field(
default="dp",
metadata={
"help": "Whether to shard the optimizer across data devices (dp), model devices (mp) or both (2d)."
},
)
num_train_epochs: int = field(
default=3, metadata={"help": "Total number of training epochs to perform."}
)
===========unchanged ref 2===========
warmup_steps: int = field(
default=0, metadata={"help": "Linear warmup over warmup_steps."}
)
lr_decay: str = field(
default=None,
metadata={
"help": "Decay to be used in the learning rate scheduler. Can be None (default), linear or exponential."
},
)
lr_transition_steps: int = field(
default=None,
metadata={
"help": "Number of transition steps associated with learning rate decay when using exponential decay."
},
)
lr_decay_rate: float = field(
default=None,
metadata={
"help": "Decay rate associated with learning rate when using exponential decay."
},
)
lr_staircase: bool = field(
default=False,
metadata={
"help": "Whether to use staircase or continuous learning rate when using exponential decay."
},
)
lr_resume_offset: bool = field(
default=False,
metadata={
"help": "Whether to offset the learning rate function with current step when resuming a run."
},
)
logging_steps: int = field(
default=40, metadata={"help": "Log every X updates steps."}
)
eval_steps: int = field(
default=400, metadata={"help": "Run an evaluation every X steps."}
)
save_steps: int = field(
default=4000, metadata={"help": "Save checkpoint every X updates steps."}
)
log_model: bool = field(
default=False,
metadata={"help": "Log model to wandb at `save_steps` frequency."},
)
log_norm_steps: int = field(
default=True,
metadata={"help": "Log parameters and gradients norm at this frequency."},
)
===========unchanged ref 3===========
log_histogram_steps: int = field(
default=False,
metadata={
"help": "Log parameters and gradients histograms at this frequency. Slows down training."
},
)
seed_model: int = field(
default=42,
metadata={
"help": "Random seed for the model that will be set at the beginning of training."
},
)
wandb_entity: Optional[str] = field(
default=None,
metadata={"help": "The wandb entity to use (for teams)."},
)
wandb_project: str = field(
default="dalle-mini",
metadata={"help": "The name of the wandb project."},
)
wandb_job_type: str = field(
default="Seq2Seq",
metadata={"help": "The name of the wandb job type."},
)
assert_TPU_available: bool = field(
default=False,
metadata={"help": "Verify that TPU is not in use."},
)
use_vmap_trick: bool = field(
default=True,
metadata={"help": "Verify that TPU is not in use."},
)
mp_devices: Optional[int] = field(
default=1,
metadata={
"help": "Number of devices required for model parallelism. The other dimension of available devices is used for data parallelism."
},
)
dp_devices: int = field(init=False)
===========changed ref 0===========
# module: dalle_mini
+ __version__ = "0.0.5"
- __version__ = "0.0.4"
|
tools.train.train/unsplit_params
|
Modified
|
borisdayma~dalle-mini
|
209ade7a3a5e94205f8c4f19289890de43da74d4
|
Merge pull request #171 from borisdayma/reduced-requests
|
<2>:<add> if k in data:
<add> flat.update(traverse_util.flatten_dict(unfreeze(data[k])))
<del> flat.update(traverse_util.flatten_dict(unfreeze(data[k])))
|
# module: tools.train.train
def unsplit_params(data):
<0> flat = {}
<1> for k in ["standard", "scanned_encoder", "scanned_decoder"]:
<2> flat.update(traverse_util.flatten_dict(unfreeze(data[k])))
<3> return freeze(traverse_util.unflatten_dict(flat))
<4>
|
===========changed ref 0===========
# module: tools.train.train
def split_params(data):
"""Split params between scanned and non-scanned"""
flat = traverse_util.flatten_dict(unfreeze(data))
split = {"standard": {}, "scanned_encoder": {}, "scanned_decoder": {}}
for k, v in flat.items():
if "FlaxBartEncoderLayers" in k:
split["scanned_encoder"][k] = v
elif "FlaxBartDecoderLayers" in k:
split["scanned_decoder"][k] = v
else:
split["standard"][k] = v
+ # remove empty keys
+ split = {k: v for k, v in split.items() if v}
for k, v in split.items():
split[k] = freeze(traverse_util.unflatten_dict(v))
return split
===========changed ref 1===========
# module: dalle_mini
+ __version__ = "0.0.5"
- __version__ = "0.0.4"
===========changed ref 2===========
# module: tools.train.train
@dataclass
class TrainingArguments:
"""
Arguments pertaining to training parameters.
"""
output_dir: str = field(
metadata={
"help": "The output directory where the model predictions and checkpoints will be written."
},
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory. "
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(
default=False, metadata={"help": "Whether to run eval on the validation set."}
)
per_device_train_batch_size: int = field(
default=8,
metadata={"help": "Batch size per data parallel device for training."},
)
per_device_eval_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Batch size per data parallel device for evaluation. Same as training batch size if not set."
},
)
gradient_accumulation_steps: int = field(
default=1,
metadata={
"help": "Number of updates steps to accumulate before performing an update pass."
},
)
gradient_checkpointing: bool = field(
default=False, metadata={"help": "Use gradient checkpointing."}
)
learning_rate: float = field(
default=5e-5, metadata={"help": "The initial learning rate."}
)
optim: str = field(
default="distributed_shampoo",
metadata={
"help": 'The optimizer to use. Can be "distributed_shampoo" (default), "adam" or "adafactor"'
},
)
beta1: float = field(
default=0.9,
metadata={"help": "Beta1 for</s>
===========changed ref 3===========
# module: tools.train.train
@dataclass
class TrainingArguments:
# offset: 1
<s> )
beta1: float = field(
default=0.9,
metadata={"help": "Beta1 for Adam & Distributed Shampoo."},
)
beta2: float = field(
default=0.999,
metadata={"help": "Beta2 for for Adam & Distributed Shampoo."},
)
adam_epsilon: float = field(
default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."}
)
max_grad_norm: float = field(
default=1.0, metadata={"help": "Max gradient norm for Adafactor."}
)
block_size: int = field(
default=1024,
metadata={"help": "Chunked size for large layers with Distributed Shampoo."},
)
preconditioning_compute_steps: int = field(
default=10, metadata={"help": "Number of steps to update preconditioner."}
)
skip_preconditioning_dim_size_gt: int = field(
default=4096,
metadata={"help": "Max size for preconditioning with Distributed Shampoo."},
)
graft_type: str = field(
default="rmsprop_normalized",
metadata={
"help": "The type of grafting to use. Can be 'rmsprop_normalized' (default), 'rmsprop', 'adagrad', 'adagrad_normalized', 'sgd' or 'sqrt_n'"
},
)
optim_quantized: bool = field(
default=False,
metadata={
"help": "Whether to quantize optimizer (only supported with Distributed Shampoo)."
},
)
shard_shampoo_across: str = field(
default="dp",
metadata={
"help": "Whether to shard the optimizer across data</s>
===========changed ref 4===========
# module: tools.train.train
@dataclass
class TrainingArguments:
# offset: 2
<s>dp), model devices (mp) or both (2d)."
},
)
num_train_epochs: int = field(
default=3, metadata={"help": "Total number of training epochs to perform."}
)
warmup_steps: int = field(
default=0, metadata={"help": "Linear warmup over warmup_steps."}
)
lr_decay: str = field(
default=None,
metadata={
"help": "Decay to be used in the learning rate scheduler. Can be None (default), linear or exponential."
},
)
lr_transition_steps: int = field(
default=None,
metadata={
"help": "Number of transition steps associated with learning rate decay when using exponential decay."
},
)
lr_decay_rate: float = field(
default=None,
metadata={
"help": "Decay rate associated with learning rate when using exponential decay."
},
)
lr_staircase: bool = field(
default=False,
metadata={
"help": "Whether to use staircase or continuous learning rate when using exponential decay."
},
)
+ lr_resume_offset: bool = field(
+ default=False,
+ metadata={
+ "help": "Whether to offset the learning rate function with current step when resuming a run."
+ },
+ )
-
logging_steps: int = field(
default=40, metadata={"help": "Log every X updates steps."}
)
eval_steps: int = field(
default=400, metadata={"help": "Run an evaluation every X steps."}
)
save_steps: int = field(
default=4000, metadata={"help": "Save checkpoint every X updates steps."}
)
log_model: bool = field</s>
|
dalle_mini.model.modeling/FlaxBartEncoderLayerCollection.__call__
|
Modified
|
borisdayma~dalle-mini
|
209ade7a3a5e94205f8c4f19289890de43da74d4
|
Merge pull request #171 from borisdayma/reduced-requests
|
<22>:<add> variable_axes={"params": 0, "cache": 0},
<del> variable_axes={"params": 0},
|
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayerCollection(nn.Module):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
<0> all_hidden_states = () if output_hidden_states else None
<1> all_self_attns = () if output_attentions else None
<2>
<3> n_layers = self.config.encoder_layers
<4> layer = (
<5> remat(
<6> FlaxBartEncoderLayer,
<7> static_argnums=(2, 3),
<8> prevent_cse=not self.config.use_scan,
<9> )
<10> if self.config.gradient_checkpointing
<11> else FlaxBartEncoderLayer
<12> )
<13>
<14> if self.config.use_scan:
<15> # all blocks are the same so we use nn.scan
<16> assert not output_attentions, "cannot scan with output_attentions"
<17> assert not output_hidden_states, "cannot scan with output_hidden_states"
<18> hidden_states = (hidden_states,)
<19> # we use a scale on all norms (even last layer) to allow scanning
<20> hidden_states, _ = nn.scan(
<21> layer,
<22> variable_axes={"params": 0},
<23> split_rngs={"params": True, "dropout": True},
<24> in_axes=(nn.broadcast, nn.broadcast, nn.broadcast),
<25> length=n_layers,
<26> )(
<27> self.config,
<28> dtype=self.dtype,
<29> add_norm=self.config.ln_positions == "postln",
<30> name="FlaxBartEncoderLayers",
<31> )(
<32> hidden_states,
<33> attention_mask,
<34> output_attentions,
<35> deterministic,
<36> )
<37> hidden_states = hidden_</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayerCollection(nn.Module):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# offset: 1
else:
for i in range(n_layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
# final layernorm on the output of the last layer
# or every 6 layers for Swin v2
add_norm = self.config.ln_positions == "postln" or (
self.config.ln_positions == "swinv2"
and ((i + 1) % 6 == 0)
and (i != n_layers - 1)
)
# we don't need to scale the norm for the last layer
use_scale = i != n_layers - 1
layer_outputs = layer(
self.config,
dtype=self.dtype,
add_norm=add_norm,
use_scale=use_scale,
name=f"FlaxBartEncoderLayer_{i}",
)(
hidden_states,
attention_mask,
output_attentions,
deterministic,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
# add hidden states from the last layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
outputs = [
hidden_states,
all_hidden_states,
all_self_attns,
]
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
</s>
===========below chunk 1===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayerCollection(nn.Module):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# offset: 2
<s>axBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
===========unchanged ref 0===========
at: dalle_mini.model.modeling
remat = nn_partitioning.remat
at: dalle_mini.model.modeling.FlaxBartEncoderLayerCollection
config: DalleBartConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
at: transformers.modeling_flax_outputs
FlaxBaseModelOutput(**kwargs: _VT)
FlaxBaseModelOutput(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
FlaxBaseModelOutput(map: Mapping[_KT, _VT], **kwargs: _VT)
===========changed ref 0===========
# module: dalle_mini
+ __version__ = "0.0.5"
- __version__ = "0.0.4"
===========changed ref 1===========
# module: tools.train.train
def unsplit_params(data):
flat = {}
for k in ["standard", "scanned_encoder", "scanned_decoder"]:
+ if k in data:
+ flat.update(traverse_util.flatten_dict(unfreeze(data[k])))
- flat.update(traverse_util.flatten_dict(unfreeze(data[k])))
return freeze(traverse_util.unflatten_dict(flat))
===========changed ref 2===========
# module: tools.train.train
def split_params(data):
"""Split params between scanned and non-scanned"""
flat = traverse_util.flatten_dict(unfreeze(data))
split = {"standard": {}, "scanned_encoder": {}, "scanned_decoder": {}}
for k, v in flat.items():
if "FlaxBartEncoderLayers" in k:
split["scanned_encoder"][k] = v
elif "FlaxBartDecoderLayers" in k:
split["scanned_decoder"][k] = v
else:
split["standard"][k] = v
+ # remove empty keys
+ split = {k: v for k, v in split.items() if v}
for k, v in split.items():
split[k] = freeze(traverse_util.unflatten_dict(v))
return split
===========changed ref 3===========
# module: tools.train.train
@dataclass
class TrainingArguments:
"""
Arguments pertaining to training parameters.
"""
output_dir: str = field(
metadata={
"help": "The output directory where the model predictions and checkpoints will be written."
},
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory. "
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(
default=False, metadata={"help": "Whether to run eval on the validation set."}
)
per_device_train_batch_size: int = field(
default=8,
metadata={"help": "Batch size per data parallel device for training."},
)
per_device_eval_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Batch size per data parallel device for evaluation. Same as training batch size if not set."
},
)
gradient_accumulation_steps: int = field(
default=1,
metadata={
"help": "Number of updates steps to accumulate before performing an update pass."
},
)
gradient_checkpointing: bool = field(
default=False, metadata={"help": "Use gradient checkpointing."}
)
learning_rate: float = field(
default=5e-5, metadata={"help": "The initial learning rate."}
)
optim: str = field(
default="distributed_shampoo",
metadata={
"help": 'The optimizer to use. Can be "distributed_shampoo" (default), "adam" or "adafactor"'
},
)
beta1: float = field(
default=0.9,
metadata={"help": "Beta1 for</s>
|
dalle_mini.model.modeling/FlaxBartDecoderLayerCollection.__call__
|
Modified
|
borisdayma~dalle-mini
|
209ade7a3a5e94205f8c4f19289890de43da74d4
|
Merge pull request #171 from borisdayma/reduced-requests
|
<26>:<add> variable_axes={"params": 0, "cache": 0},
<del> variable_axes={"params": 0},
|
<s>Collection(nn.Module):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
<0> # decoder layers
<1> all_hidden_states = () if output_hidden_states else None
<2> all_self_attns = () if output_attentions else None
<3> all_cross_attentions = (
<4> () if (output_attentions and encoder_hidden_states is not None) else None
<5> )
<6>
<7> n_layers = self.config.decoder_layers
<8> layer = (
<9> remat(
<10> FlaxBartDecoderLayer,
<11> static_argnums=(4, 5, 6),
<12> prevent_cse=not self.config.use_scan,
<13> )
<14> if self.config.gradient_checkpointing
<15> else FlaxBartDecoderLayer
<16> )
<17>
<18> if self.config.use_scan:
<19> # all blocks are the same so we use nn.scan
<20> assert not output_attentions, "cannot scan with output_attentions"
<21> assert not output_hidden_states, "cannot scan with output_hidden_states"
<22> hidden_states = (hidden_states,)
<23> # we use a scale on all norms (even last layer) to allow scanning
<24> hidden_states, _ = nn.scan(
<25> layer,
<26> variable_axes={"params": 0},
<27> split_rngs={"params": True, "dropout": True},
<28> in_axes=(
<29> nn.broadcast,
<30> nn.broadcast,
<31> nn.broadcast,
<32> nn.broadcast,
<33> nn.broadcast,
<34> nn.broadcast,
<35> ),</s>
|
===========below chunk 0===========
<s>):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# offset: 1
)(
self.config,
dtype=self.dtype,
add_norm=self.config.ln_positions == "postln",
name="FlaxBartDecoderLayers",
)(
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
init_cache,
output_attentions,
deterministic,
)
hidden_states = hidden_states[0]
else:
for i in range(n_layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
# final layernorm on the output of the last layer
# or every 6 layers for Swin v2
add_norm = self.config.ln_positions == "postln" or (
self.config.ln_positions == "swinv2"
and ((i + 1) % 6 == 0)
and (i != n_layers - 1)
)
# we don't need to scale the norm for the last layer
use_scale = i != n_layers - 1
layer_outputs = layer(
self.config,
dtype=self.dtype,
add_norm=add_norm,
use_scale=use_scale,
name=f"FlaxBartDecoderLayer_{i}",
)(
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
init_cache,
output_attentions,
deterministic,
)
hidden_states = layer</s>
===========below chunk 1===========
<s>):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# offset: 2
<s>mask,
init_cache,
output_attentions,
deterministic,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
outputs = [
hidden_states,
all_hidden_states,
all_self_attns,
all_cross_attentions,
]
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
===========unchanged ref 0===========
at: dalle_mini.model.modeling
remat = nn_partitioning.remat
at: dalle_mini.model.modeling.FlaxBartDecoderLayerCollection
config: DalleBartConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
at: transformers.modeling_flax_outputs
FlaxBaseModelOutputWithPastAndCrossAttentions(**kwargs: _VT)
FlaxBaseModelOutputWithPastAndCrossAttentions(map: Mapping[_KT, _VT], **kwargs: _VT)
FlaxBaseModelOutputWithPastAndCrossAttentions(iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT)
===========changed ref 0===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayerCollection(nn.Module):
@nn.compact
def __call__(
self,
hidden_states,
attention_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
n_layers = self.config.encoder_layers
layer = (
remat(
FlaxBartEncoderLayer,
static_argnums=(2, 3),
prevent_cse=not self.config.use_scan,
)
if self.config.gradient_checkpointing
else FlaxBartEncoderLayer
)
if self.config.use_scan:
# all blocks are the same so we use nn.scan
assert not output_attentions, "cannot scan with output_attentions"
assert not output_hidden_states, "cannot scan with output_hidden_states"
hidden_states = (hidden_states,)
# we use a scale on all norms (even last layer) to allow scanning
hidden_states, _ = nn.scan(
layer,
+ variable_axes={"params": 0, "cache": 0},
- variable_axes={"params": 0},
split_rngs={"params": True, "dropout": True},
in_axes=(nn.broadcast, nn.broadcast, nn.broadcast),
length=n_layers,
)(
self.config,
dtype=self.dtype,
add_norm=self.config.ln_positions == "postln",
name="FlaxBartEncoderLayers",
)(
hidden_states,
attention_mask,
output_attentions,
deterministic,
)
hidden_states = hidden_states[0]
else:
for i in range(n_layers):
</s>
|
dalle_mini.model.modeling/FlaxBartPreTrainedModel.num_params
|
Modified
|
borisdayma~dalle-mini
|
6b841551e3cbc1956cc499821081f56af53f0b1a
|
feat(train): use new HF _do_init api
|
<0>:<add> if params is None:
<add> params = self.params
<1>:<add> lambda param: param.size, flatten_dict(unfreeze(params))
<del> lambda param: param.size, flatten_dict(unfreeze(self.params))
|
# module: dalle_mini.model.modeling
class FlaxBartPreTrainedModel(FlaxBartPreTrainedModel):
- @property
+ def num_params(self, params=None):
- def num_params(self):
<0> num_params = jax.tree_map(
<1> lambda param: param.size, flatten_dict(unfreeze(self.params))
<2> ).values()
<3> return sum(list(num_params))
<4>
|
===========unchanged ref 0===========
at: dalle_mini.model.modeling.FlaxBartPreTrainedModel
config_class = DalleBartConfig
at: transformers.modeling_flax_utils.FlaxPreTrainedModel
config_class = None
base_model_prefix = ""
main_input_name = "input_ids"
_auto_class = None
_missing_keys = set()
at: transformers.modeling_flax_utils.FlaxPreTrainedModel.__init__
self.params = random_params
===========changed ref 0===========
# module: tools.train.train
def main():
# See all possible arguments by passing the --help flag to this script.
parser = HfArgumentParser(
(ModelArguments, DataTrainingArguments, TrainingArguments)
)
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# check arguments
if training_args.mp_devices > jax.local_device_count():
assert (
data_args.seed_dataset is not None
), "Seed dataset must be provided when model is split over multiple hosts"
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
if jax.process_index() == 0:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# Set the verbosity to info of the Transformers logger (on main process only):
logger.info(f"Training/evaluation parameters {training_args}")
# Load dataset
dataset = Dataset(
**asdict(data_args),
do_train=training_args</s>
===========changed ref 1===========
# module: tools.train.train
def main():
# offset: 1
<s>
# Load dataset
dataset = Dataset(
**asdict(data_args),
do_train=training_args.do_train,
do_eval=training_args.do_eval,
)
logger.info(f"Local TPUs: {jax.local_device_count()}")
logger.info(f"Global TPUs: {jax.device_count()}")
# Set up wandb run
if jax.process_index() == 0:
wandb.init(
entity=training_args.wandb_entity,
project=training_args.wandb_project,
job_type=training_args.wandb_job_type,
config=parser.parse_args(),
)
# Set up our new model config
if model_args.config_name:
config = DalleBartConfig.from_pretrained(model_args.config_name)
config.gradient_checkpointing = training_args.gradient_checkpointing
else:
config = None
# Load or create new model
if model_args.model_name_or_path:
+ model, params = DalleBart.from_pretrained(
- model = DalleBart.from_pretrained(
model_args.model_name_or_path,
config=config,
seed=training_args.seed_model,
dtype=getattr(jnp, model_args.dtype),
+ _do_init=False, # we overwrite them with loaded checkpoint
- abstract_init=True, # we overwrite them with loaded checkpoint
gradient_checkpointing=training_args.gradient_checkpointing,
)
else:
model = DalleBart(
config,
seed=training_args.seed_model,
dtype=getattr(jnp, model_args.dtype),
+ </s>
===========changed ref 2===========
# module: tools.train.train
def main():
# offset: 2
<s>_init=False,
+ )
+ params = None
+ params_shape = model.params_shape_tree
- abstract_init=True,
- )
# get model metadata
model_metadata = model_args.get_metadata()
# get PartitionSpec for model params (required to be a dict)
+ param_spec = set_partitions(params_shape, model.config.use_scan)
+ params_shape = freeze(params_shape)
+ if params is not None:
+ params = freeze(params)
- param_spec = set_partitions(model.params, model.config.use_scan)
-
- # convert params to frozen dict
- model._params = freeze(model.params)
# Load tokenizer
tokenizer = DalleBartTokenizer.from_pretrained(
model_args.tokenizer_name, use_fast=True
)
# Preprocessing the datasets.
# We need to normalize and tokenize inputs and targets.
dataset.preprocess(tokenizer=tokenizer, config=model.config)
# Initialize our training
dropout_rng = jax.random.PRNGKey(training_args.seed_model)
# Store some constant
num_epochs = training_args.num_train_epochs
# batch size
batch_size_per_node_per_grad_step = (
training_args.per_device_train_batch_size
* jax.local_device_count()
// training_args.mp_devices
)
batch_size_per_node = (
batch_size_per_node_per_grad_step * training_args.gradient_accumulation_steps
)
batch_size_per_step = batch_size_per_node * jax.process_count()
eval_batch_size_per_node = (
training_args.per_</s>
|
tools.train.train/TrainingArguments.__post_init__
|
Modified
|
borisdayma~dalle-mini
|
acc1a4a30516e13262c75d1fb0c800eb4d4ebbcd
|
feat: allow weight decay
|
<13>:<add> if self.optim == "adafactor" and self.weight_decay == 0:
<add> self.weight_decay = None
|
# module: tools.train.train
@dataclass
class TrainingArguments:
def __post_init__(self):
<0> if self.assert_TPU_available:
<1> assert (
<2> jax.local_device_count() == 8
<3> ), "TPUs in use, please check running processes"
<4> if self.output_dir.startswith("gs://"):
<5> assert (
<6> storage is not None
<7> ), 'Could not find google.storage. Install with "pip install google-cloud-storage"'
<8> assert self.optim in [
<9> "distributed_shampoo",
<10> "adam",
<11> "adafactor",
<12> ], f"Selected optimizer not supported: {self.optim}"
<13> assert self.graft_type in [
<14> "rmsprop_normalized",
<15> "rmsprop",
<16> "adagrad",
<17> "adagrad_normalized",
<18> "sgd",
<19> "sqrt_n",
<20> ], f"Selected graft type not supported: {self.graft_type}"
<21> assert self.lr_decay in [
<22> None,
<23> "linear",
<24> "exponential",
<25> ], f"Selected learning rate decay not supported: {self.lr_decay}"
<26> if self.per_device_eval_batch_size is None:
<27> self.per_device_eval_batch_size = self.per_device_train_batch_size
<28> if self.log_norm_steps is True:
<29> self.log_norm_steps = self.logging_steps
<30> if (
<31> os.path.exists(self.output_dir)
<32> and os.listdir(self.output_dir)
<33> and self.do_train
<34> and not self.overwrite_output_dir
<35> ):
<36> raise ValueError(
<37> f"Output directory ({self.output_dir}) already exists and is not empty."
<38> "Use --overwrite_output_dir to overcome."
<39> )
<40> assert self.shard_shampoo_across in [
<41> </s>
|
===========below chunk 0===========
# module: tools.train.train
@dataclass
class TrainingArguments:
def __post_init__(self):
# offset: 1
"mp",
"2d",
], f"Shard shampoo across {self.shard_shampoo_across} not supported."
assert (
self.mp_devices > 0
), f"Number of devices for model parallelism must be > 0"
assert (
jax.device_count() % self.mp_devices == 0
), f"Number of available devices ({jax.device_count()} must be divisible by number of devices used for model parallelism ({self.mp_devices})."
self.dp_devices = jax.device_count() // self.mp_devices
===========unchanged ref 0===========
at: dataclasses
field(*, default_factory: Callable[[], _T], init: bool=..., repr: bool=..., hash: Optional[bool]=..., compare: bool=..., metadata: Optional[Mapping[str, Any]]=...) -> _T
field(*, init: bool=..., repr: bool=..., hash: Optional[bool]=..., compare: bool=..., metadata: Optional[Mapping[str, Any]]=...) -> Any
field(*, default: _T, init: bool=..., repr: bool=..., hash: Optional[bool]=..., compare: bool=..., metadata: Optional[Mapping[str, Any]]=...) -> _T
at: os
listdir(path: bytes) -> List[bytes]
listdir(path: int) -> List[str]
listdir(path: Optional[str]=...) -> List[str]
listdir(path: _PathLike[str]) -> List[str]
at: os.path
exists(path: Union[AnyStr, _PathLike[AnyStr]]) -> bool
at: tools.train.train
storage = None
at: tools.train.train.TrainingArguments
output_dir: str = field(
metadata={
"help": "The output directory where the model predictions and checkpoints will be written."
},
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory. "
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(
default=False, metadata={"help": "Whether to run eval on the validation set."}
)
per_device_train_batch_size: int = field(
default=8,
metadata={"help": "Batch size per data parallel device for training."},
)
===========unchanged ref 1===========
per_device_eval_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Batch size per data parallel device for evaluation. Same as training batch size if not set."
},
)
gradient_accumulation_steps: int = field(
default=1,
metadata={
"help": "Number of updates steps to accumulate before performing an update pass."
},
)
gradient_checkpointing: bool = field(
default=False, metadata={"help": "Use gradient checkpointing."}
)
learning_rate: float = field(
default=5e-5, metadata={"help": "The initial learning rate."}
)
optim: str = field(
default="distributed_shampoo",
metadata={
"help": 'The optimizer to use. Can be "distributed_shampoo" (default), "adam" or "adafactor"'
},
)
weight_decay: float = field(
default=0.0, metadata={"help": "Weight decay applied to parameters."}
)
beta1: float = field(
default=0.9,
metadata={"help": "Beta1 for Adam & Distributed Shampoo."},
)
beta2: float = field(
default=0.999,
metadata={"help": "Beta2 for for Adam & Distributed Shampoo."},
)
adam_epsilon: float = field(
default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."}
)
max_grad_norm: float = field(
default=1.0, metadata={"help": "Max gradient norm for Adafactor."}
)
block_size: int = field(
default=1024,
metadata={"help": "Chunked size for large layers with Distributed Shampoo."},
)
===========unchanged ref 2===========
preconditioning_compute_steps: int = field(
default=10, metadata={"help": "Number of steps to update preconditioner."}
)
skip_preconditioning_dim_size_gt: int = field(
default=4096,
metadata={"help": "Max size for preconditioning with Distributed Shampoo."},
)
graft_type: str = field(
default="rmsprop_normalized",
metadata={
"help": "The type of grafting to use. Can be 'rmsprop_normalized' (default), 'rmsprop', 'adagrad', 'adagrad_normalized', 'sgd' or 'sqrt_n'"
},
)
optim_quantized: bool = field(
default=False,
metadata={
"help": "Whether to quantize optimizer (only supported with Distributed Shampoo)."
},
)
shard_shampoo_across: str = field(
default="dp",
metadata={
"help": "Whether to shard the optimizer across data devices (dp), model devices (mp) or both (2d)."
},
)
num_train_epochs: int = field(
default=3, metadata={"help": "Total number of training epochs to perform."}
)
warmup_steps: int = field(
default=0, metadata={"help": "Linear warmup over warmup_steps."}
)
lr_decay: str = field(
default=None,
metadata={
"help": "Decay to be used in the learning rate scheduler. Can be None (default), linear or exponential."
},
)
lr_transition_steps: int = field(
default=None,
metadata={
"help": "Number of transition steps associated with learning rate decay when using exponential decay."
},
)
lr_decay_rate: float = field(
default=None,
metadata={
"help": "Decay rate associated with learning rate when using exponential decay."
},
)
|
tools.train.scalable_shampoo.distributed_shampoo/init_training_metrics
|
Modified
|
borisdayma~dalle-mini
|
79a3849945dbe2c23d2376aa765c88a96ffbd666
|
feat: update shampoo
|
<2>:<add> if not num_statistics:
<add> return TrainingMetrics(jnp.array(0, jnp.float32))
<add> else:
<del> n = 1 if not num_statistics else num_statistics
<3>:<add> return TrainingMetrics(jnp.zeros([num_statistics], jnp.float32))
<del> return TrainingMetrics(jnp.zeros([n], jnp.float32))
|
# module: tools.train.scalable_shampoo.distributed_shampoo
def init_training_metrics(num_statistics):
<0> # Since the downstream apis expect a jnp.array - we create a dummy one if
<1> # num_statistics=0.
<2> n = 1 if not num_statistics else num_statistics
<3> return TrainingMetrics(jnp.zeros([n], jnp.float32))
<4>
|
===========unchanged ref 0===========
at: tools.train.scalable_shampoo.distributed_shampoo
TrainingMetrics()
|
tools.train.scalable_shampoo.distributed_shampoo/init_training_metrics_shapes
|
Modified
|
borisdayma~dalle-mini
|
79a3849945dbe2c23d2376aa765c88a96ffbd666
|
feat: update shampoo
|
<2>:<add> if not num_statistics:
<del> n = 1 if not num_statistics else num_statistics
<3>:<add> return TrainingMetrics([[], jnp.float32])
<del> return TrainingMetrics([[n], jnp.float32])
<4>:<add> else:
<add> return TrainingMetrics([[num_statistics], jnp.float32])
|
# module: tools.train.scalable_shampoo.distributed_shampoo
def init_training_metrics_shapes(num_statistics):
<0> # Since the downstream apis expect a jnp.array - we create a dummy one if
<1> # num_statistics=0.
<2> n = 1 if not num_statistics else num_statistics
<3> return TrainingMetrics([[n], jnp.float32])
<4>
|
===========changed ref 0===========
# module: tools.train.scalable_shampoo.distributed_shampoo
def init_training_metrics(num_statistics):
# Since the downstream apis expect a jnp.array - we create a dummy one if
# num_statistics=0.
+ if not num_statistics:
+ return TrainingMetrics(jnp.array(0, jnp.float32))
+ else:
- n = 1 if not num_statistics else num_statistics
+ return TrainingMetrics(jnp.zeros([num_statistics], jnp.float32))
- return TrainingMetrics(jnp.zeros([n], jnp.float32))
|
tools.train.scalable_shampoo.distributed_shampoo/matrix_inverse_pth_root
|
Modified
|
borisdayma~dalle-mini
|
79a3849945dbe2c23d2376aa765c88a96ffbd666
|
feat: update shampoo
|
# module: tools.train.scalable_shampoo.distributed_shampoo
def matrix_inverse_pth_root(
matrix,
p,
num_iters=100,
ridge_epsilon=1e-6,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
):
<0> """Computes `matrix^(-1/p)`, where `p` is a positive integer.
<1>
<2> This function uses the Coupled newton iterations algorithm for
<3> the computation of a matrix's inverse pth root.
<4>
<5>
<6> References:
<7> [Functions of Matrices, Theory and Computation,
<8> Nicholas J Higham, Pg 184, Eq 7.18](
<9> https://epubs.siam.org/doi/book/10.1137/1.9780898717778)
<10>
<11> Args:
<12> matrix: the symmetric PSD matrix whose power it to be computed
<13> p: exponent, for p a positive integer.
<14> num_iters: Maximum number of iterations.
<15> ridge_epsilon: Ridge epsilon added to make the matrix positive definite.
<16> error_tolerance: Error indicator, useful for early termination.
<17> precision: precision XLA related flag, the available options are: a)
<18> lax.Precision.DEFAULT (better step time, but not precise) b)
<19> lax.Precision.HIGH (increased precision, slower) c) lax.Precision.HIGHEST
<20> (best possible precision, slowest)
<21>
<22> Returns:
<23> matrix^(-1/p)
<24> """
<25>
<26> # If the input is not square, materialize it from the concatenated form.
<27> if matrix.shape[0] != matrix.shape[1]:
<28> matrix = symmetric_matrices.materialize_matrix_from_concat(matrix)
<29>
<30> assert matrix.shape[0] == matrix.shape[1]
<31>
<32> # We use _MAT_INV_PTH_ROOT_DTYPE for the matrix inverse pth root.
<33> # Switch</s>
|
===========below chunk 0===========
# module: tools.train.scalable_shampoo.distributed_shampoo
def matrix_inverse_pth_root(
matrix,
p,
num_iters=100,
ridge_epsilon=1e-6,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
):
# offset: 1
# jax_enable_x64 for this to work.
matrix_size = matrix.shape[0]
orig_dtype = matrix.dtype
matrix = matrix.astype(_MAT_INV_PTH_ROOT_DTYPE)
alpha = jnp.asarray(-1.0 / p, _MAT_INV_PTH_ROOT_DTYPE)
identity = jnp.eye(matrix_size, dtype=_MAT_INV_PTH_ROOT_DTYPE)
_, max_ev = power_iteration(
matrix=matrix, num_iters=100, error_tolerance=1e-6, precision=precision
)
ridge_epsilon = ridge_epsilon * jnp.maximum(max_ev, 1e-6)
def _iter_condition(state):
(i, unused_mat_m, unused_mat_h, unused_old_mat_h, error, run_step) = state
error_above_threshold = jnp.logical_and(error > error_tolerance, run_step)
return jnp.logical_and(i < num_iters, error_above_threshold)
def _iter_body(state):
(i, mat_m, mat_h, unused_old_mat_h, error, unused_run_step) = state
mat_m_i = (1 - alpha) * identity + alpha * mat_m
new_mat_m = jnp.matmul(mat_power(mat_m_i, p), mat_m, precision=precision)
new_mat_h = jnp.matmul(mat_h, mat_m_i, precision=precision)
new_error = jnp.max(jnp.abs(new_mat_m -</s>
===========below chunk 1===========
# module: tools.train.scalable_shampoo.distributed_shampoo
def matrix_inverse_pth_root(
matrix,
p,
num_iters=100,
ridge_epsilon=1e-6,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
):
# offset: 2
<s>m_i, precision=precision)
new_error = jnp.max(jnp.abs(new_mat_m - identity))
# sometimes error increases after an iteration before decreasing and
# converging. 1.2 factor is used to bound the maximal allowed increase.
return (i + 1, new_mat_m, new_mat_h, mat_h, new_error, new_error < error * 1.2)
if matrix_size == 1:
resultant_mat_h = (matrix + ridge_epsilon) ** alpha
error = 0
else:
damped_matrix = matrix + ridge_epsilon * identity
z = (1 + p) / (2 * jnp.linalg.norm(damped_matrix))
new_mat_m_0 = damped_matrix * z
new_error = jnp.max(jnp.abs(new_mat_m_0 - identity))
new_mat_h_0 = identity * jnp.power(z, 1.0 / p)
init_state = tuple([0, new_mat_m_0, new_mat_h_0, new_mat_h_0, new_error, True])
_, mat_m, mat_h, old_mat_h, error, convergence = lax.while_loop(
_iter_condition, _iter_body, init_state
)
error = jnp.max(jnp.abs(mat_m - identity)).astype(jnp.float32)
is_converged = jnp.as</s>
===========below chunk 2===========
# module: tools.train.scalable_shampoo.distributed_shampoo
def matrix_inverse_pth_root(
matrix,
p,
num_iters=100,
ridge_epsilon=1e-6,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
):
# offset: 3
<s>convergence, old_mat_h.dtype)
resultant_mat_h = is_converged * mat_h + (1 - is_converged) * old_mat_h
resultant_mat_h = jnp.asarray(resultant_mat_h, orig_dtype)
return resultant_mat_h, error
===========unchanged ref 0===========
at: tools.train.scalable_shampoo.distributed_shampoo
_MAT_INV_PTH_ROOT_DTYPE = jnp.float64
power_iteration(matrix, num_iters=100, error_tolerance=1e-6, precision=lax.Precision.HIGHEST)
mat_power(mat_m, p, precision=lax.Precision.HIGHEST)
at: tools.train.scalable_shampoo.distributed_shampoo.mat_power
power = jnp.eye(mat_m.shape[0], dtype=_MAT_INV_PTH_ROOT_DTYPE)
_iter_condition(state)
_iter_body(state)
at: tools.train.scalable_shampoo.symmetric_matrices.symmetric_matrices
materialize_matrix_from_concat(block_rows_concat, num_blocks=None)
===========changed ref 0===========
# module: tools.train.scalable_shampoo.distributed_shampoo
def init_training_metrics_shapes(num_statistics):
# Since the downstream apis expect a jnp.array - we create a dummy one if
# num_statistics=0.
+ if not num_statistics:
- n = 1 if not num_statistics else num_statistics
+ return TrainingMetrics([[], jnp.float32])
- return TrainingMetrics([[n], jnp.float32])
+ else:
+ return TrainingMetrics([[num_statistics], jnp.float32])
===========changed ref 1===========
# module: tools.train.scalable_shampoo.distributed_shampoo
def init_training_metrics(num_statistics):
# Since the downstream apis expect a jnp.array - we create a dummy one if
# num_statistics=0.
+ if not num_statistics:
+ return TrainingMetrics(jnp.array(0, jnp.float32))
+ else:
- n = 1 if not num_statistics else num_statistics
+ return TrainingMetrics(jnp.zeros([num_statistics], jnp.float32))
- return TrainingMetrics(jnp.zeros([n], jnp.float32))
|
|
tools.train.scalable_shampoo.distributed_shampoo/_add_error_into_local_stats
|
Modified
|
borisdayma~dalle-mini
|
79a3849945dbe2c23d2376aa765c88a96ffbd666
|
feat: update shampoo
|
<3>:<add> if local_stat.sizes:
<add> index_start = int(local_stat.index_start)
<del> index_start = int(local_stat.index_start)
<4>:<add> index_end = int(len(local_stat.sizes)) + index_start
<del> index_end = int(len(local_stat.sizes)) + index_start
<5>:<add> per_stat_error = errors[index_start:index_end]
<del> per_stat_error = errors[index_start:index_end]
<6>:<add> else:
<add> per_stat_error = jnp.array(0, jnp.float32)
|
# module: tools.train.scalable_shampoo.distributed_shampoo
def _add_error_into_local_stats(local_stats, errors, inverse_failure_threshold):
<0> """Adds errors back into local statistics."""
<1> new_local_stats = []
<2> for local_stat in local_stats:
<3> index_start = int(local_stat.index_start)
<4> index_end = int(len(local_stat.sizes)) + index_start
<5> per_stat_error = errors[index_start:index_end]
<6> if local_stat.sizes:
<7> per_stat_error = jnp.where(
<8> jnp.logical_and(
<9> per_stat_error > 0.0, per_stat_error != inverse_failure_threshold
<10> ),
<11> per_stat_error,
<12> local_stat.training_metrics.inverse_pth_root_errors,
<13> )
<14> new_local_stats.append(
<15> LocalShardedParameterStats(
<16> local_stat.diagonal_statistics,
<17> local_stat.diagonal_momentum,
<18> local_stat.momentum,
<19> TrainingMetrics(per_stat_error),
<20> local_stat.index_start,
<21> local_stat.sizes,
<22> )
<23> )
<24> return new_local_stats
<25>
|
===========unchanged ref 0===========
at: tools.train.scalable_shampoo.distributed_shampoo
LocalShardedParameterStats()
at: tools.train.scalable_shampoo.distributed_shampoo.LocalShardedParameterStats
diagonal_statistics: QuantizedValue # Accumulator for diagonal preconditioner
diagonal_momentum: QuantizedValue # Momentum for the diagonal preconditioner
momentum: QuantizedValue # Momentum for the shampoo preconditioner
training_metrics: TrainingMetrics # Metrics (optional for training).
index_start: np.int32 = struct.field(
pytree_node=False
) # Index into global statistics array
sizes: Any = struct.field(pytree_node=False) # Sizes of the statistics.
at: tools.train.scalable_shampoo.distributed_shampoo.TrainingMetrics
inverse_pth_root_errors: chex.Array # Error for inverse-pth roots.
===========changed ref 0===========
# module: tools.train.scalable_shampoo.distributed_shampoo
def init_training_metrics_shapes(num_statistics):
# Since the downstream apis expect a jnp.array - we create a dummy one if
# num_statistics=0.
+ if not num_statistics:
- n = 1 if not num_statistics else num_statistics
+ return TrainingMetrics([[], jnp.float32])
- return TrainingMetrics([[n], jnp.float32])
+ else:
+ return TrainingMetrics([[num_statistics], jnp.float32])
===========changed ref 1===========
# module: tools.train.scalable_shampoo.distributed_shampoo
def init_training_metrics(num_statistics):
# Since the downstream apis expect a jnp.array - we create a dummy one if
# num_statistics=0.
+ if not num_statistics:
+ return TrainingMetrics(jnp.array(0, jnp.float32))
+ else:
- n = 1 if not num_statistics else num_statistics
+ return TrainingMetrics(jnp.zeros([num_statistics], jnp.float32))
- return TrainingMetrics(jnp.zeros([n], jnp.float32))
===========changed ref 2===========
# module: tools.train.scalable_shampoo.distributed_shampoo
def matrix_inverse_pth_root(
matrix,
p,
num_iters=100,
ridge_epsilon=1e-6,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
):
"""Computes `matrix^(-1/p)`, where `p` is a positive integer.
This function uses the Coupled newton iterations algorithm for
the computation of a matrix's inverse pth root.
References:
[Functions of Matrices, Theory and Computation,
Nicholas J Higham, Pg 184, Eq 7.18](
https://epubs.siam.org/doi/book/10.1137/1.9780898717778)
Args:
matrix: the symmetric PSD matrix whose power it to be computed
p: exponent, for p a positive integer.
num_iters: Maximum number of iterations.
ridge_epsilon: Ridge epsilon added to make the matrix positive definite.
error_tolerance: Error indicator, useful for early termination.
precision: precision XLA related flag, the available options are: a)
lax.Precision.DEFAULT (better step time, but not precise) b)
lax.Precision.HIGH (increased precision, slower) c) lax.Precision.HIGHEST
(best possible precision, slowest)
Returns:
matrix^(-1/p)
"""
# If the input is not square, materialize it from the concatenated form.
if matrix.shape[0] != matrix.shape[1]:
matrix = symmetric_matrices.materialize_matrix_from_concat(matrix)
assert matrix.shape[0] == matrix.shape[1]
# We use _MAT_INV_PTH_ROOT_DTYPE for the matrix inverse pth root.
# Switch to f64 if you have hardware that supports it. Enable the jax flag
# jax_enable_x64 for this to work.
matrix_size</s>
===========changed ref 3===========
# module: tools.train.scalable_shampoo.distributed_shampoo
def matrix_inverse_pth_root(
matrix,
p,
num_iters=100,
ridge_epsilon=1e-6,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
):
# offset: 1
<s> you have hardware that supports it. Enable the jax flag
# jax_enable_x64 for this to work.
matrix_size = matrix.shape[0]
orig_dtype = matrix.dtype
matrix = matrix.astype(_MAT_INV_PTH_ROOT_DTYPE)
alpha = jnp.asarray(-1.0 / p, _MAT_INV_PTH_ROOT_DTYPE)
identity = jnp.eye(matrix_size, dtype=_MAT_INV_PTH_ROOT_DTYPE)
_, max_ev = power_iteration(
matrix=matrix, num_iters=100, error_tolerance=1e-6, precision=precision
)
ridge_epsilon = ridge_epsilon * jnp.maximum(max_ev, 1e-6)
def _iter_condition(state):
(i, unused_mat_m, unused_mat_h, unused_old_mat_h, error, run_step) = state
error_above_threshold = jnp.logical_and(error > error_tolerance, run_step)
return jnp.logical_and(i < num_iters, error_above_threshold)
def _iter_body(state):
(i, mat_m, mat_h, unused_old_mat_h, error, unused_run_step) = state
mat_m_i = (1 - alpha) * identity + alpha * mat_m
new_mat_m = jnp.matmul(mat_power(mat_m_i, p), mat_m, precision=precision)
new_mat</s>
|
tools.train.train/ModelArguments.get_metadata
|
Modified
|
borisdayma~dalle-mini
|
c3e93dfa329b1086d6c738ec5f2a36b4ccd77a46
|
feat: support LR offset (#174)
|
<0>:<add> if ":" in self.model_name_or_path:
<del> if self.restore_state:
|
# module: tools.train.train
@dataclass
class ModelArguments:
def get_metadata(self):
<0> if self.restore_state:
<1> if jax.process_index() == 0:
<2> artifact = wandb.run.use_artifact(self.model_name_or_path)
<3> else:
<4> artifact = wandb.Api().artifact(self.model_name_or_path)
<5> return artifact.metadata
<6> else:
<7> return dict()
<8>
|
===========unchanged ref 0===========
at: tools.train.train.ModelArguments
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. "
"Don't set if you want to train a model from scratch. "
"W&B artifact references are supported in addition to the sources supported by `PreTrainedModel`."
},
)
config_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained config name or path if not the same as model_name_or_path"
},
)
tokenizer_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained tokenizer name or path if not the same as model_name_or_path"
},
)
dtype: Optional[str] = field(
default="float32",
metadata={
"help": "Floating-point format in which the computations will be performed (not the model weights). Choose one of `[float32, float16, bfloat16]`."
},
)
restore_state: Optional[bool] = field(
default=False,
metadata={
"help": "Restore optimizer and training state. Can be True (will retrieve associated wandb artifact), a local directory or a Google bucket path."
},
)
at: wandb
Api = PublicApi
run: Optional["wandb_sdk.wandb_run.Run"] = None
at: wandb.apis.public.Api
_HTTP_TIMEOUT = env.get_http_timeout(9)
===========unchanged ref 1===========
VIEWER_QUERY = gql(
"""
query Viewer{
viewer {
id
flags
entity
username
email
admin
apiKeys {
edges {
node {
id
name
description
}
}
}
teams {
edges {
node {
name
}
}
}
}
}
"""
)
USERS_QUERY = gql(
"""
query SearchUsers($query: String) {
users(query: $query) {
edges {
node {
id
flags
entity
admin
email
deletedAt
username
apiKeys {
edges {
node {
id
name
description
}
}
}
teams {
edges {
node {
name
}
}
}
}
}
}
}
"""
)
CREATE_PROJECT = gql(
"""
mutation upsertModel(
$description: String
$entityName: String
$id: String
$name: String
$framework: String
$access: String
$views: JSONString
) {
upsertModel(
input: {
description: $description
entityName: $entityName
id: $id
name: $name
framework: $framework
access: $access
views: $views
}
) {
project {
id
name
entityName
description
access
views
}
model {
id
name
entityName
description
access
views
}
inserted
}
}
"""
)
artifact(name, type=None)
===========unchanged ref 2===========
at: wandb.apis.public.Artifact
QUERY = gql(
"""
query ArtifactWithCurrentManifest(
$id: ID!,
) {
artifact(id: $id) {
currentManifest {
id
file {
id
directUrl
}
}
...ArtifactFragment
}
}
%s
"""
% ARTIFACT_FRAGMENT
)
at: wandb.sdk.wandb_run.Run
_telemetry_obj: telemetry.TelemetryRecord
_telemetry_obj_active: bool
_telemetry_obj_dirty: bool
_telemetry_obj_flushed: bytes
_teardown_hooks: List[TeardownHook]
_tags: Optional[Tuple[Any, ...]]
_entity: Optional[str]
_project: Optional[str]
_group: Optional[str]
_job_type: Optional[str]
_name: Optional[str]
_notes: Optional[str]
_run_obj: Optional[RunRecord]
_run_obj_offline: Optional[RunRecord]
_backend: Optional["wandb.sdk.backend.backend.Backend"]
_internal_run_interface: Optional[
Union[
"wandb.sdk.interface.interface_queue.InterfaceQueue",
"wandb.sdk.interface.interface_grpc.InterfaceGrpc",
]
]
_wl: Optional[_WandbSetup]
_out_redir: Optional[redirect.RedirectBase]
_err_redir: Optional[redirect.RedirectBase]
_redirect_cb: Optional[Callable[[str, str], None]]
_redirect_raw_cb: Optional[Callable[[str, str], None]]
_output_writer: Optional["filesystem.CRDedupedFile"]
_quiet: Optional[bool]
_atexit_cleanup_called: bool
_hooks: Optional[ExitHooks]
===========unchanged ref 3===========
_exit_code: Optional[int]
_run_status_checker: Optional[RunStatusChecker]
_check_version: Optional["CheckVersionResponse"]
_sampled_history: Optional["SampledHistoryResponse"]
_final_summary: Optional["GetSummaryResponse"]
_poll_exit_handle: Optional[MailboxHandle]
_poll_exit_response: Optional[PollExitResponse]
_server_info_response: Optional[ServerInfoResponse]
_stdout_slave_fd: Optional[int]
_stderr_slave_fd: Optional[int]
_artifact_slots: List[str]
_init_pid: int
_attach_pid: int
_iface_pid: Optional[int]
_iface_port: Optional[int]
_attach_id: Optional[str]
_is_attached: bool
_settings: Settings
_launch_artifacts: Optional[Dict[str, Any]]
_printer: Union["PrinterTerm", "PrinterJupyter"]
use_artifact(self, artifact_or_name: Union[str, public.Artifact, Artifact], type: Optional[str]=None, aliases: Optional[List[str]]=None, use_as: Optional[str]=None) -> Union[public.Artifact, Artifact]
|
tools.train.train/TrainingArguments.__post_init__
|
Modified
|
borisdayma~dalle-mini
|
65bb95f06b74e4472a1d023c771d7a5748121c9d
|
feat: allow eval only
|
<32>:<add> if not self.do_train:
<add> self.num_train_epochs = 1
|
# module: tools.train.train
@dataclass
class TrainingArguments:
def __post_init__(self):
<0> if self.assert_TPU_available:
<1> assert (
<2> jax.local_device_count() == 8
<3> ), "TPUs in use, please check running processes"
<4> if self.output_dir.startswith("gs://"):
<5> assert (
<6> storage is not None
<7> ), 'Could not find google.storage. Install with "pip install google-cloud-storage"'
<8> assert self.optim in [
<9> "distributed_shampoo",
<10> "adam",
<11> "adafactor",
<12> ], f"Selected optimizer not supported: {self.optim}"
<13> if self.optim == "adafactor" and self.weight_decay == 0:
<14> self.weight_decay = None
<15> assert self.graft_type in [
<16> "rmsprop_normalized",
<17> "rmsprop",
<18> "adagrad",
<19> "adagrad_normalized",
<20> "sgd",
<21> "sqrt_n",
<22> ], f"Selected graft type not supported: {self.graft_type}"
<23> assert self.lr_decay in [
<24> None,
<25> "linear",
<26> "exponential",
<27> ], f"Selected learning rate decay not supported: {self.lr_decay}"
<28> if self.per_device_eval_batch_size is None:
<29> self.per_device_eval_batch_size = self.per_device_train_batch_size
<30> if self.log_norm_steps is True:
<31> self.log_norm_steps = self.logging_steps
<32> if (
<33> os.path.exists(self.output_dir)
<34> and os.listdir(self.output_dir)
<35> and self.do_train
<36> and not self.overwrite_output_dir
<37> ):
<38> raise ValueError(
<39> f"Output directory ({self.output_dir}) already exists and is not empty."
<40> "Use --overwrite</s>
|
===========below chunk 0===========
# module: tools.train.train
@dataclass
class TrainingArguments:
def __post_init__(self):
# offset: 1
)
assert self.shard_shampoo_across in [
"dp",
"mp",
"2d",
], f"Shard shampoo across {self.shard_shampoo_across} not supported."
assert (
self.mp_devices > 0
), f"Number of devices for model parallelism must be > 0"
assert (
jax.device_count() % self.mp_devices == 0
), f"Number of available devices ({jax.device_count()} must be divisible by number of devices used for model parallelism ({self.mp_devices})."
self.dp_devices = jax.device_count() // self.mp_devices
===========unchanged ref 0===========
at: os
listdir(path: bytes) -> List[bytes]
listdir(path: int) -> List[str]
listdir(path: Optional[str]=...) -> List[str]
listdir(path: _PathLike[str]) -> List[str]
at: os.path
exists(path: Union[AnyStr, _PathLike[AnyStr]]) -> bool
at: tools.train.train
storage = None
at: tools.train.train.TrainingArguments
output_dir: str = field(
metadata={
"help": "The output directory where the model predictions and checkpoints will be written."
},
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory. "
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(
default=False, metadata={"help": "Whether to run eval on the validation set."}
)
per_device_train_batch_size: int = field(
default=8,
metadata={"help": "Batch size per data parallel device for training."},
)
per_device_eval_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Batch size per data parallel device for evaluation. Same as training batch size if not set."
},
)
gradient_accumulation_steps: int = field(
default=1,
metadata={
"help": "Number of updates steps to accumulate before performing an update pass."
},
)
gradient_checkpointing: bool = field(
default=False, metadata={"help": "Use gradient checkpointing."}
)
learning_rate: float = field(
default=5e-5, metadata={"help": "The initial learning rate."}
)
===========unchanged ref 1===========
optim: str = field(
default="distributed_shampoo",
metadata={
"help": 'The optimizer to use. Can be "distributed_shampoo" (default), "adam" or "adafactor"'
},
)
weight_decay: float = field(
default=0.0, metadata={"help": "Weight decay applied to parameters."}
)
beta1: float = field(
default=0.9,
metadata={"help": "Beta1 for Adam & Distributed Shampoo."},
)
beta2: float = field(
default=0.999,
metadata={"help": "Beta2 for for Adam & Distributed Shampoo."},
)
adam_epsilon: float = field(
default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."}
)
max_grad_norm: float = field(
default=1.0, metadata={"help": "Max gradient norm for Adafactor."}
)
block_size: int = field(
default=1024,
metadata={"help": "Chunked size for large layers with Distributed Shampoo."},
)
preconditioning_compute_steps: int = field(
default=10, metadata={"help": "Number of steps to update preconditioner."}
)
skip_preconditioning_dim_size_gt: int = field(
default=4096,
metadata={"help": "Max size for preconditioning with Distributed Shampoo."},
)
graft_type: str = field(
default="rmsprop_normalized",
metadata={
"help": "The type of grafting to use. Can be 'rmsprop_normalized' (default), 'rmsprop', 'adagrad', 'adagrad_normalized', 'sgd' or 'sqrt_n'"
},
)
===========unchanged ref 2===========
nesterov: bool = field(
default=False,
metadata={"help": "Use Nesterov momentum for Distributed Shampoo."},
)
optim_quantized: bool = field(
default=False,
metadata={
"help": "Whether to quantize optimizer (only supported with Distributed Shampoo)."
},
)
shard_shampoo_across: str = field(
default="dp",
metadata={
"help": "Whether to shard the optimizer across data devices (dp), model devices (mp) or both (2d)."
},
)
num_train_epochs: int = field(
default=3, metadata={"help": "Total number of training epochs to perform."}
)
warmup_steps: int = field(
default=0, metadata={"help": "Linear warmup over warmup_steps."}
)
lr_decay: str = field(
default=None,
metadata={
"help": "Decay to be used in the learning rate scheduler. Can be None (default), linear or exponential."
},
)
lr_transition_steps: int = field(
default=None,
metadata={
"help": "Number of transition steps associated with learning rate decay when using exponential decay."
},
)
lr_decay_rate: float = field(
default=None,
metadata={
"help": "Decay rate associated with learning rate when using exponential decay."
},
)
lr_staircase: bool = field(
default=False,
metadata={
"help": "Whether to use staircase or continuous learning rate when using exponential decay."
},
)
lr_offset: int = field(
default=0,
metadata={"help": "Number of steps to offset learning rate and keep it at 0."},
)
logging_steps: int = field(
default=40, metadata={"help": "Log every X updates steps."}
)
|
tools.train.train/ModelArguments.get_metadata
|
Modified
|
borisdayma~dalle-mini
|
89bc9d4b7fe9715028880bb69d144563fffd660b
|
fix(train): overwrite dropout only when specified
|
<0>:<add> if self.model_name_or_path is not None and ":" in self.model_name_or_path:
<del> if ":" in self.model_name_or_path:
|
# module: tools.train.train
@dataclass
class ModelArguments:
def get_metadata(self):
<0> if ":" in self.model_name_or_path:
<1> if jax.process_index() == 0:
<2> artifact = wandb.run.use_artifact(self.model_name_or_path)
<3> else:
<4> artifact = wandb.Api().artifact(self.model_name_or_path)
<5> return artifact.metadata
<6> else:
<7> return dict()
<8>
|
===========unchanged ref 0===========
at: tools.train.train.ModelArguments
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. "
"Don't set if you want to train a model from scratch. "
"W&B artifact references are supported in addition to the sources supported by `PreTrainedModel`."
},
)
config_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained config name or path if not the same as model_name_or_path"
},
)
tokenizer_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained tokenizer name or path if not the same as model_name_or_path"
},
)
dtype: Optional[str] = field(
default="float32",
metadata={
"help": "Floating-point format in which the computations will be performed (not the model weights). Choose one of `[float32, float16, bfloat16]`."
},
)
restore_state: Optional[bool] = field(
default=False,
metadata={
"help": "Restore optimizer and training state. Can be True (will retrieve associated wandb artifact), a local directory or a Google bucket path."
},
)
dropout: Optional[float] = field(
default=None,
metadata={"help": "Dropout rate. Overwrites config."},
)
activation_dropout: Optional[float] = field(
default=None,
metadata={"help": "Activation dropout rate. Overwrites config."},
)
attention_dropout: Optional[float] = field(
default=None,
metadata={"help": "Attention dropout rate. Overwrites config."},
)
at: wandb
Api = PublicApi
run: Optional["wandb_sdk.wandb_run.Run"] = None
===========unchanged ref 1===========
at: wandb.apis.public.Api
_HTTP_TIMEOUT = env.get_http_timeout(9)
VIEWER_QUERY = gql(
"""
query Viewer{
viewer {
id
flags
entity
username
email
admin
apiKeys {
edges {
node {
id
name
description
}
}
}
teams {
edges {
node {
name
}
}
}
}
}
"""
)
USERS_QUERY = gql(
"""
query SearchUsers($query: String) {
users(query: $query) {
edges {
node {
id
flags
entity
admin
email
deletedAt
username
apiKeys {
edges {
node {
id
name
description
}
}
}
teams {
edges {
node {
name
}
}
}
}
}
}
}
"""
)
CREATE_PROJECT = gql(
"""
mutation upsertModel(
$description: String
$entityName: String
$id: String
$name: String
$framework: String
$access: String
$views: JSONString
) {
upsertModel(
input: {
description: $description
entityName: $entityName
id: $id
name: $name
framework: $framework
access: $access
views: $views
}
) {
project {
id
name
entityName
description
access
views
}
model {
id
name
entityName
description
access
views
}
inserted
}
}
"""
)
artifact(name, type=None)
===========unchanged ref 2===========
at: wandb.apis.public.Artifact
QUERY = gql(
"""
query ArtifactWithCurrentManifest(
$id: ID!,
) {
artifact(id: $id) {
currentManifest {
id
file {
id
directUrl
}
}
...ArtifactFragment
}
}
%s
"""
% ARTIFACT_FRAGMENT
)
at: wandb.sdk.wandb_run.Run
_telemetry_obj: telemetry.TelemetryRecord
_telemetry_obj_active: bool
_telemetry_obj_dirty: bool
_telemetry_obj_flushed: bytes
_teardown_hooks: List[TeardownHook]
_tags: Optional[Tuple[Any, ...]]
_entity: Optional[str]
_project: Optional[str]
_group: Optional[str]
_job_type: Optional[str]
_name: Optional[str]
_notes: Optional[str]
_run_obj: Optional[RunRecord]
_run_obj_offline: Optional[RunRecord]
_backend: Optional["wandb.sdk.backend.backend.Backend"]
_internal_run_interface: Optional[
Union[
"wandb.sdk.interface.interface_queue.InterfaceQueue",
"wandb.sdk.interface.interface_grpc.InterfaceGrpc",
]
]
_wl: Optional[_WandbSetup]
_out_redir: Optional[redirect.RedirectBase]
_err_redir: Optional[redirect.RedirectBase]
_redirect_cb: Optional[Callable[[str, str], None]]
_redirect_raw_cb: Optional[Callable[[str, str], None]]
_output_writer: Optional["filesystem.CRDedupedFile"]
_quiet: Optional[bool]
_atexit_cleanup_called: bool
_hooks: Optional[ExitHooks]
===========unchanged ref 3===========
_exit_code: Optional[int]
_run_status_checker: Optional[RunStatusChecker]
_check_version: Optional["CheckVersionResponse"]
_sampled_history: Optional["SampledHistoryResponse"]
_final_summary: Optional["GetSummaryResponse"]
_poll_exit_handle: Optional[MailboxHandle]
_poll_exit_response: Optional[PollExitResponse]
_server_info_response: Optional[ServerInfoResponse]
_stdout_slave_fd: Optional[int]
_stderr_slave_fd: Optional[int]
_artifact_slots: List[str]
_init_pid: int
_attach_pid: int
_iface_pid: Optional[int]
_iface_port: Optional[int]
_attach_id: Optional[str]
_is_attached: bool
_settings: Settings
_launch_artifacts: Optional[Dict[str, Any]]
_printer: Union["PrinterTerm", "PrinterJupyter"]
use_artifact(self, artifact_or_name: Union[str, public.Artifact, Artifact], type: Optional[str]=None, aliases: Optional[List[str]]=None, use_as: Optional[str]=None) -> Union[public.Artifact, Artifact]
|
dalle_mini.data/Dataset.__post_init__
|
Modified
|
borisdayma~dalle-mini
|
3109050d47c8c9db7963436519efdc6292d5d8a5
|
feat: support multi validation datasets (#192)
|
<35>:<add> # multiple validation datasets
<add> if self.multi_eval_ds:
<add> assert Path(
<add> self.dataset_repo_or_path
<add> ).is_dir(), f"{self.dataset_repo_or_path} is not a directory, required for multi_eval_ds"
<add> data_files = {
<add> split.name: [str(f) for f in split.glob("*.parquet")]
<add> for split in Path(self.dataset_repo_or_path).glob("*")
<add> }
<add> # rename "valid" to "validation" if present for consistency
<add> if "valid" in data_files:
<add> data_files["validation"] = data_files["valid"]
<add> del data_files["valid"]
<add> self.dataset_repo_or_path = "parquet"
<add>
|
# module: dalle_mini.data
@dataclass
class Dataset:
def __post_init__(self):
<0> if self.seed_dataset is None:
<1> # create a random seed
<2> self.seed_dataset = random.randint(0, 2**32 - 1)
<3> # set numpy rng
<4> self.np_rng = np.random.default_rng(self.seed_dataset)
<5> self.multi_hosts = jax.process_count() > 1
<6> # feed blank captions only in streaming mode for now
<7> # otherwise dataset could be cached with same blanked captions
<8> if self.blank_caption_prob:
<9> assert (
<10> self.streaming is True
<11> ), "blank_caption_prob can only be used in streaming mode"
<12> # define data_files
<13> if self.train_file is not None or self.validation_file is not None:
<14> # accept braceexpand notation
<15> for k in ["train_file", "validation_file"]:
<16> f = getattr(self, k)
<17> if isinstance(f, str):
<18> setattr(self, k, list(braceexpand(f)))
<19> # for list of files, split training data shards by host
<20> if (
<21> isinstance(self.train_file, list)
<22> and self.multi_hosts
<23> and self.shard_by_host
<24> ):
<25> self.train_file = self.train_file[
<26> jax.process_index() :: jax.process_count()
<27> ]
<28> data_files = {
<29> "train": self.train_file,
<30> "validation": self.validation_file,
<31> }
<32> else:
<33> data_files = None
<34>
<35> # load dataset
<36> dataset = load_dataset(
<37> self.dataset_repo_or_path,
<38> data_files=data_files,
<39> streaming=self.streaming,
<40> use_auth_token=self.use_auth_token,
<41> )
<42> if self.do_train:</s>
|
===========below chunk 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
def __post_init__(self):
# offset: 1
raise ValueError("Training requires a training dataset")
self.train_dataset = dataset["train"]
if self.max_train_samples is not None:
self.train_dataset = (
self.train_dataset.take(self.max_train_samples)
if self.streaming
else self.train_dataset.select(range(self.max_train_samples))
)
if self.do_eval:
if "validation" not in dataset:
raise ValueError("Evaluating requires a validation dataset")
self.eval_dataset = dataset["validation"]
if self.max_eval_samples is not None:
self.eval_dataset = (
self.eval_dataset.take(self.max_eval_samples)
if self.streaming
else self.eval_dataset.select(range(self.max_eval_samples))
)
===========unchanged ref 0===========
at: dalle_mini.data.Dataset
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
shard_by_host: bool = False
blank_caption_prob: float = 0.0
clip_score_column: str = "clip_score"
min_clip_score: float = None
max_clip_score: float = None
filter_column: str = None
filter_value: str = None
multi_eval_ds: bool = False
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
other_eval_datasets: list = field(init=False)
at: dataclasses
field(*, default_factory: Callable[[], _T], init: bool=..., repr: bool=..., hash: Optional[bool]=..., compare: bool=..., metadata: Optional[Mapping[str, Any]]=...) -> _T
field(*, init: bool=..., repr: bool=..., hash: Optional[bool]=..., compare: bool=..., metadata: Optional[Mapping[str, Any]]=...) -> Any
field(*, default: _T, init: bool=..., repr: bool=..., hash: Optional[bool]=..., compare: bool=..., metadata: Optional[Mapping[str, Any]]=...) -> _T
===========unchanged ref 1===========
at: datasets.load
load_dataset(path: str, name: Optional[str]=None, data_dir: Optional[str]=None, data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]]=None, split: Optional[Union[str, Split]]=None, cache_dir: Optional[str]=None, features: Optional[Features]=None, download_config: Optional[DownloadConfig]=None, download_mode: Optional[DownloadMode]=None, ignore_verifications: bool=False, keep_in_memory: Optional[bool]=None, save_infos: bool=False, revision: Optional[Union[str, Version]]=None, use_auth_token: Optional[Union[bool, str]]=None, task: Optional[Union[str, TaskTemplate]]=None, streaming: bool=False, num_proc: Optional[int]=None, *, num_process: int=1, process_id: int=0, seed: Optional[int]=None, experiment_id: Optional[str]=None, max_concurrent_cache_files: int=10000, timeout: Union[int, float]=100, base_path: Optional[str]=None, info: Optional[DatasetInfo]=None, repo_id: Optional[str]=None, **kwargs) -> Union[DatasetDict, Dataset, IterableDatasetDict, IterableDataset]
at: numpy.random._generator
default_rng(seed: None | _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator=...) -> Generator
at: pathlib
Path()
at: pathlib.Path
__slots__ = ()
glob(pattern: str) -> Generator[_P, None, None]
is_dir() -> bool
at: pathlib.PurePath
__slots__ = (
'_drv', '_root', '_parts',
'_str', '_hash', '_pparts', '_cached_cparts',
)
drive = property(attrgetter('_drv'),
doc="""The drive prefix (letter or UNC path), if any.""")
===========unchanged ref 2===========
root = property(attrgetter('_root'),
doc="""The root of the path, if any.""")
at: random
randint = _inst.randint
===========changed ref 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
shard_by_host: bool = False
blank_caption_prob: float = 0.0
clip_score_column: str = "clip_score"
min_clip_score: float = None
max_clip_score: float = None
filter_column: str = None
filter_value: str = None
+ multi_eval_ds: bool = False
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
+ other_eval_datasets: list = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
multi_hosts: bool = field(init=False)
|
dalle_mini.data/Dataset.dataloader
|
Modified
|
borisdayma~dalle-mini
|
3109050d47c8c9db7963436519efdc6292d5d8a5
|
feat: support multi validation datasets (#192)
|
# module: dalle_mini.data
@dataclass
class Dataset:
def dataloader(self, split, batch_size, epoch=None):
<0> def _dataloader_datasets_non_streaming(
<1> dataset: Dataset,
<2> rng: jax.random.PRNGKey = None,
<3> ):
<4> """
<5> Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices.
<6> Shuffle batches if rng is set.
<7> """
<8> steps_per_epoch = len(dataset) // batch_size
<9>
<10> if rng is not None:
<11> batch_idx = jax.random.permutation(rng, len(dataset))
<12> else:
<13> batch_idx = jnp.arange(len(dataset))
<14>
<15> batch_idx = batch_idx[
<16> : steps_per_epoch * batch_size
<17> ] # Skip incomplete batch.
<18> batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))
<19>
<20> for idx in batch_idx:
<21> batch = dataset[idx]
<22> batch = {k: jnp.array(v) for k, v in batch.items()}
<23> yield batch
<24>
<25> def _dataloader_datasets_streaming(
<26> dataset: Dataset,
<27> epoch: int,
<28> ):
<29> keys = ["input_ids", "attention_mask", "labels", "decoder_input_ids"]
<30> batch = {k: [] for k in keys}
<31> first_loop = True # stop after one loop in some cases
<32> while (self.multi_hosts and split == "train") or first_loop:
<33> # in multi-host, we run forever (no epoch) as hosts need to stop
<34> # at the same time and training data may not be split equally
<35> # For validation data we put the entire batch on each host and then
<36> # keep only the one specific to each host (could be improved but not necessary)
<37> if epoch is not None:
<38> assert split == "train</s>
|
===========below chunk 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
def dataloader(self, split, batch_size, epoch=None):
# offset: 1
# reshuffle training data at each epoch
dataset.set_epoch(epoch)
epoch += 1
for item in dataset:
for k in keys:
batch[k].append(item[k])
if len(batch[keys[0]]) == batch_size:
batch = {k: jnp.array(v) for k, v in batch.items()}
yield batch
batch = {k: [] for k in keys}
first_loop = False
if split == "train":
ds = self.train_dataset
elif split == "eval":
ds = self.eval_dataset
else:
raise ValueError(f'split must be "train" or "eval", got {split}')
if self.streaming:
return _dataloader_datasets_streaming(ds, epoch)
else:
if split == "train":
self.rng_dataset, input_rng = jax.random.split(self.rng_dataset)
return _dataloader_datasets_non_streaming(ds, input_rng)
===========unchanged ref 0===========
at: dalle_mini.data
self.other_eval_datasets = {
split: (
ds.map(
partial_preprocess_function,
batched=True,
remove_columns=[
self.text_column,
self.encoding_column,
],
)
if self.streaming
else ds.map(
partial_preprocess_function,
batched=True,
remove_columns=getattr(ds, "column_names"),
num_proc=self.preprocessing_num_workers,
load_from_cache_file=not self.overwrite_cache,
desc="Preprocessing datasets",
)
)
for split, ds in self.other_eval_datasets.items()
}
data_files = {
split.name: [str(f) for f in split.glob("*.parquet")]
for split in Path(self.dataset_repo_or_path).glob("*")
}
self.other_eval_datasets = {
split: (
ds.map(
partial_preprocess_function,
batched=True,
remove_columns=[
self.text_column,
self.encoding_column,
],
)
if self.streaming
else ds.map(
partial_preprocess_function,
batched=True,
remove_columns=getattr(ds, "column_names"),
num_proc=self.preprocessing_num_workers,
load_from_cache_file=not self.overwrite_cache,
desc="Preprocessing datasets",
)
)
for split, ds in self.other_eval_datasets.items()
}
preprocess_function(examples, tokenizer, text_column, encoding_column, max_length, decoder_start_token_id)
at: dalle_mini.data.Dataset
streaming: bool = True
text_column: str = "caption"
encoding_column: str = "encoding"
preprocessing_num_workers: int = None
===========unchanged ref 1===========
overwrite_cache: bool = False
at: dalle_mini.data.Dataset.__post_init__
self.seed_dataset = random.randint(0, 2**32 - 1)
self.np_rng = np.random.default_rng(self.seed_dataset)
self.train_dataset = dataset["train"]
self.train_dataset = (
self.train_dataset.take(self.max_train_samples)
if self.streaming
else self.train_dataset.select(range(self.max_train_samples))
)
self.other_eval_datasets = {
split: dataset[split] for split in other_eval_splits
}
at: dalle_mini.data.Dataset.preprocess
decoder_start_token_id = config.decoder_start_token_id
max_length = config.max_text_length
partial_blank_caption_function = partial(
blank_caption_function,
text_column=self.text_column,
blank_caption_prob=self.blank_caption_prob,
rng=self.np_rng,
)
===========unchanged ref 2===========
at: datasets.arrow_dataset.Dataset
map(function: Optional[Callable]=None, with_indices: bool=False, with_rank: bool=False, input_columns: Optional[Union[str, List[str]]]=None, batched: bool=False, batch_size: Optional[int]=1000, drop_last_batch: bool=False, remove_columns: Optional[Union[str, List[str]]]=None, keep_in_memory: bool=False, load_from_cache_file: bool=None, cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, features: Optional[Features]=None, disable_nullable: bool=False, fn_kwargs: Optional[dict]=None, num_proc: Optional[int]=None, suffix_template: str="_{rank:05d}_of_{num_proc:05d}", new_fingerprint: Optional[str]=None, desc: Optional[str]=None) -> "Dataset"
at: datasets.dataset_dict.DatasetDict
map(function: Optional[Callable]=None, with_indices: bool=False, with_rank: bool=False, input_columns: Optional[Union[str, List[str]]]=None, batched: bool=False, batch_size: Optional[int]=1000, drop_last_batch: bool=False, remove_columns: Optional[Union[str, List[str]]]=None, keep_in_memory: bool=False, load_from_cache_file: bool=True, cache_file_names: Optional[Dict[str, Optional[str]]]=None, writer_batch_size: Optional[int]=1000, features: Optional[Features]=None, disable_nullable: bool=False, fn_kwargs: Optional[dict]=None, num_proc: Optional[int]=None, desc: Optional[str]=None) -> "DatasetDict"
at: functools
partial(func: Callable[..., _T], *args: Any, **kwargs: Any)
partial(func, *args, **keywords, /) -> function with partial application()
===========changed ref 0===========
# module: dalle_mini.data
@dataclass
class Dataset:
dataset_repo_or_path: str
train_file: str = None
validation_file: str = None
streaming: bool = True
use_auth_token: bool = False
text_column: str = "caption"
encoding_column: str = "encoding"
max_train_samples: int = None
max_eval_samples: int = None
preprocessing_num_workers: int = None
overwrite_cache: bool = False
do_train: bool = False
do_eval: bool = True
seed_dataset: int = None
shard_by_host: bool = False
blank_caption_prob: float = 0.0
clip_score_column: str = "clip_score"
min_clip_score: float = None
max_clip_score: float = None
filter_column: str = None
filter_value: str = None
+ multi_eval_ds: bool = False
train_dataset: Dataset = field(init=False)
eval_dataset: Dataset = field(init=False)
+ other_eval_datasets: list = field(init=False)
rng_dataset: jnp.ndarray = field(init=False)
multi_hosts: bool = field(init=False)
|
|
dalle_mini.model.text/remove_html_tags
|
Modified
|
borisdayma~dalle-mini
|
f698aa3b9a7a645e0f103d583a8b3a69ce63a871
|
fix(processor): html tags should create a space
|
<0>:<add> return re.sub("<[^<]+?>", " ", t)
<del> return re.sub("<[^<]+?>", "", t)
|
# module: dalle_mini.model.text
def remove_html_tags(t):
<0> return re.sub("<[^<]+?>", "", t)
<1>
|
===========unchanged ref 0===========
at: re
sub(pattern: AnyStr, repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: Pattern[AnyStr], repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: AnyStr, repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
sub(pattern: Pattern[AnyStr], repl: AnyStr, string: AnyStr, count: int=..., flags: _FlagsType=...) -> AnyStr
|
dalle_mini.model.modeling/DalleBart.num_params
|
Modified
|
borisdayma~dalle-mini
|
dd740e6d84bbb24769878698b1800e1304d27800
|
feat: train embedding layers only (#291)
|
<2>:<add> num_params = jax.tree_util.tree_map(
<del> num_params = jax.tree_map(
|
# module: dalle_mini.model.modeling
class DalleBart(PretrainedFromWandbMixin, FlaxBartForConditionalGeneration):
def num_params(self, params=None):
<0> if params is None:
<1> params = self.params
<2> num_params = jax.tree_map(
<3> lambda param: param.size, flatten_dict(unfreeze(params))
<4> ).values()
<5> return sum(list(num_params))
<6>
|
===========unchanged ref 0===========
at: dalle_mini.model.modeling.DalleBart
module_class = FlaxBartForConditionalGenerationModule
config_class = DalleBartConfig
at: transformers.modeling_flax_utils.FlaxPreTrainedModel
config_class = None
base_model_prefix = ""
main_input_name = "input_ids"
_auto_class = None
_missing_keys = set()
at: transformers.modeling_flax_utils.FlaxPreTrainedModel.__init__
self.params = random_params
===========changed ref 0===========
# module: tools.train.train
try:
from google.cloud import storage
except:
storage = None
- cc.initialize_cache("./jax_cache", max_cache_size_bytes=10 * 2**30)
-
logger = logging.getLogger(__name__)
+ cc.initialize_cache("jax_cache")
+
===========changed ref 1===========
# module: tools.train.train
- class TrainState(struct.PyTreeNode):
- step: int
- params: core.FrozenDict[str, Any]
- opt_state: optax.OptState
- apply_fn: Callable = struct.field(pytree_node=False)
- tx: optax.GradientTransformation = struct.field(pytree_node=False)
- dropout_rng: jnp.ndarray = None
- epoch: int = 0
- train_time: float = 0.0 # total time the model trained
- train_samples: int = 0 # number of samples seen
-
===========changed ref 2===========
# module: tools.train.train
+ def trainable_params(data, embeddings_only):
+ """Keep only trainable parameters"""
+
+ if not embeddings_only:
+ return data
+
+ data = unfreeze(data)
+ trainable = {
+ "lm_head": data["lm_head"],
+ "model": {
+ "decoder": {
+ layer: data["model"]["decoder"][layer]
+ for layer in [
+ "embed_positions",
+ "embed_tokens",
+ "final_ln",
+ "layernorm_embedding",
+ ]
+ }
+ },
+ }
+ return freeze(trainable)
+
===========changed ref 3===========
# module: tools.train.train
- class TrainState(struct.PyTreeNode):
- @classmethod
- def create(cls, *, apply_fn, params, tx, **kwargs):
- opt_state = {}
- for k, p in split_params(params).items():
- init_fn = tx[k].init
- if "scanned" in k:
- init_fn = jax.vmap(init_fn)
- opt_state[k] = init_fn(p)
- return cls(
- step=0,
- apply_fn=apply_fn,
- params=params,
- tx=tx,
- opt_state=freeze(opt_state),
- **kwargs,
- )
-
===========changed ref 4===========
# module: tools.train.train
+ def init_embeddings(model, params):
+ """Reinitialize trainable embeddings"""
+ # Must match params in trainable_params() above
+ trainable_keypaths = [
+ "lm_head.kernel",
+ "model.decoder.embed_positions.embedding",
+ "model.decoder.embed_tokens.embedding",
+ "model.decoder.final_ln.bias",
+ "model.decoder.layernorm_embedding.bias",
+ "model.decoder.layernorm_embedding.scale",
+ ]
+
+ # Note: using private _missing_keys
+ init_keys = {tuple(k.split(".")) for k in trainable_keypaths}
+ model._missing_keys = init_keys
+ return model.init_weights(model.key, model.input_shape, params=params)
+
===========changed ref 5===========
# module: tools.train.train
- class TrainState(struct.PyTreeNode):
- def apply_gradients(self, *, grads, **kwargs):
- grads = split_params(grads)
- params = split_params(self.params)
- opt_state = {}
- # we loop over keys: "standard", "scanned_encoder", "scanned_decoder"
- for k, param in params.items():
- update_fn = self.tx[k].update
- if "scanned" in k:
- update_fn = jax.vmap(update_fn, in_axes=(0, 0, 0), out_axes=(0, 0))
- updates, new_opt_state = update_fn(grads[k], self.opt_state[k], param)
- params[k] = optax.apply_updates(param, updates)
- opt_state[k] = new_opt_state
- params = unsplit_params(params)
-
- return self.replace(
- step=self.step + 1,
- params=params,
- opt_state=freeze(opt_state),
- **kwargs,
- )
-
===========changed ref 6===========
# module: tools.train.train
@dataclass
class TrainingArguments:
"""
Arguments pertaining to training parameters.
"""
output_dir: str = field(
metadata={
"help": "The output directory where the model predictions and checkpoints will be written."
},
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory. "
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(
default=False, metadata={"help": "Whether to run eval on the validation set."}
)
per_device_train_batch_size: int = field(
default=8,
metadata={"help": "Batch size per data parallel device for training."},
)
per_device_eval_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Batch size per data parallel device for evaluation. Same as training batch size if not set."
},
)
gradient_accumulation_steps: int = field(
default=1,
metadata={
"help": "Number of updates steps to accumulate before performing an update pass."
},
)
gradient_checkpointing: bool = field(
default=False, metadata={"help": "Use gradient checkpointing."}
)
learning_rate: float = field(
default=5e-5, metadata={"help": "The initial learning rate."}
)
optim: str = field(
default="distributed_shampoo",
metadata={
"help": 'The optimizer to use. Can be "distributed_shampoo" (default), "adam" or "adafactor"'
},
)
weight_decay: float = field(
default=0.0, metadata={"help": "Weight decay applied to parameters</s>
|
dalle_mini.model.modeling/dot_product_attention_weights
|
Modified
|
borisdayma~dalle-mini
|
f05d82b9c474a27f9def742ce97c1d265a2952e4
|
fix: swin v2 scaling (thanks @lucidrains) (#297)
|
<11>:<del> # calculate attention matrix
<12>:<del> depth = query.shape[-1]
<13>:<del> query = query / jnp.sqrt(depth).astype(dtype)
<16>:<add>
<add> # divide by tau (used in Swin v2)
<add> if tau is not None:
<add> attn_weights = attn_weights / tau
<add> else:
<add> depth = query.shape[-1]
<add> attn_weights = attn_weights / jnp.sqrt(depth).astype(dtype)
|
<s> = None,
mask: Optional[Any] = None,
embed_pos: Optional[Any] = None,
broadcast_dropout: bool = True,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.0,
deterministic: bool = False,
dtype: Any = jnp.float32,
precision: PrecisionLike = None,
sinkhorn_iters: int = 1,
is_encoder: bool = False,
+ tau=None,
):
<0> """
<1> Computes dot-product attention weights given query and key.
<2> mask is included into the bias.
<3>
<4> Adapted from flax.linen.attention.dot_product_attention_weights"
<5> """
<6> assert query.ndim == key.ndim, "q, k must have same rank."
<7> assert query.shape[:-3] == key.shape[:-3], "q, k batch dims must match."
<8> assert query.shape[-2] == key.shape[-2], "q, k num_heads must match."
<9> assert query.shape[-1] == key.shape[-1], "q, k depths must match."
<10>
<11> # calculate attention matrix
<12> depth = query.shape[-1]
<13> query = query / jnp.sqrt(depth).astype(dtype)
<14> # attn weight shape is (batch..., num_heads, q_length, kv_length)
<15> attn_weights = jnp.einsum("...qhd,...khd->...hqk", query, key, precision=precision)
<16>
<17> # apply attention bias: masking, dropout, proximity bias, etc.
<18> if bias is not None:
<19> attn_weights = attn_weights + bias
<20>
<21> # add relative position
<22> if embed_pos is not None:
<23> attn_weights = attn_weights + embed_pos
<24>
<25> # normalize the attention weights
<26> if not is_encoder or sinkhorn_iters == 1:
<27> # sinkhorn does not work for causal (leaks info</s>
|
===========below chunk 0===========
<s> mask: Optional[Any] = None,
embed_pos: Optional[Any] = None,
broadcast_dropout: bool = True,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.0,
deterministic: bool = False,
dtype: Any = jnp.float32,
precision: PrecisionLike = None,
sinkhorn_iters: int = 1,
is_encoder: bool = False,
+ tau=None,
):
# offset: 1
attn_weights = jax.nn.softmax(attn_weights).astype(dtype)
else:
# adapted from https://github.com/lucidrains/sinkhorn-transformer
for i in range(sinkhorn_iters):
# when causal, some attn_weights have been set to -inf through bias
if i % 2 == 0:
attn_weights -= jax.nn.logsumexp(attn_weights, axis=-1, keepdims=True)
else:
attn_weights -= jax.nn.logsumexp(attn_weights, axis=-2, keepdims=True)
if mask is not None:
attn_weights = jnp.where(mask, attn_weights, -jnp.inf)
attn_weights = jnp.exp(attn_weights).astype(dtype)
# apply attention dropout
if not deterministic and dropout_rate > 0.0:
keep_prob = 1.0 - dropout_rate
if broadcast_dropout:
# dropout is broadcast across the batch + head dimensions
dropout_shape = tuple([1] * (key.ndim - 2)) + attn_weights.shape[-2:]
keep = jax.random.bernoulli(dropout_rng, keep_prob, dropout_shape)
else:
keep = jax.random.bernoulli(dropout_rng, keep_prob, attn_weights.shape)
multiplier = keep.astype(attn_weights.dtype) / jnp.asarray(
keep_prob, dtype=dtype
</s>
===========below chunk 1===========
<s> mask: Optional[Any] = None,
embed_pos: Optional[Any] = None,
broadcast_dropout: bool = True,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.0,
deterministic: bool = False,
dtype: Any = jnp.float32,
precision: PrecisionLike = None,
sinkhorn_iters: int = 1,
is_encoder: bool = False,
+ tau=None,
):
# offset: 2
<s> = keep.astype(attn_weights.dtype) / jnp.asarray(
keep_prob, dtype=dtype
)
attn_weights = attn_weights * multiplier
return attn_weights
|
dalle_mini.model.modeling/FlaxBartAttention.setup
|
Modified
|
borisdayma~dalle-mini
|
f05d82b9c474a27f9def742ce97c1d265a2952e4
|
fix: swin v2 scaling (thanks @lucidrains) (#297)
|
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def setup(self) -> None:
<0> self.head_dim = self.embed_dim // self.num_heads
<1> if self.head_dim * self.num_heads != self.embed_dim:
<2> raise ValueError(
<3> f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
<4> f" and `num_heads`: {self.num_heads})."
<5> )
<6>
<7> dense = partial(
<8> nn.Dense,
<9> self.embed_dim,
<10> use_bias=self.bias,
<11> dtype=self.dtype,
<12> )
<13>
<14> gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
<15> self.config
<16> )
<17>
<18> self.q_proj = dense(
<19> kernel_init=deepnet_init()
<20> if self.config.use_deepnet_scaling
<21> else jax.nn.initializers.normal(self.config.init_std)
<22> )
<23> self.k_proj = dense(
<24> kernel_init=deepnet_init()
<25> if self.config.use_deepnet_scaling
<26> else jax.nn.initializers.normal(self.config.init_std)
<27> )
<28> self.v_proj = dense(
<29> kernel_init=deepnet_init(gain)
<30> if self.config.use_deepnet_scaling
<31> else jax.nn.initializers.normal(self.config.init_std)
<32> )
<33> self.out_proj = dense(
<34> kernel_init=deepnet_init(gain)
<35> if self.config.use_deepnet_scaling
<36> else jax.nn.initializers.normal(self.config.init_std)
<37> )
<38> self.dropout_layer = nn.Dropout(rate=self.dropout)
<39>
<40> if</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def setup(self) -> None:
# offset: 1
self.head_scale = self.param(
"head_scale", jax.nn.initializers.ones, (1, 1, self.num_heads, 1)
)
if self.config.use_cosine_attention:
self.tau = self.param(
"tau",
jax.nn.initializers.constant(self.config.tau_init),
(1, self.num_heads, 1, 1),
)
if self.config.use_swin_position_embeddings:
self.rel_bias = nn.Embed(
self.q_length,
self.k_length * self.num_heads,
embedding_init=deepnet_init()
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)
if self.causal:
# used only in decoder
self.causal_mask = make_causal_mask(
jnp.ones((1, self.config.image_length), dtype="bool"), dtype="bool"
)
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_init(gain=1)
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
at: functools
partial(func: Callable[..., _T], *args: Any, **kwargs: Any)
partial(func, *args, **keywords, /) -> function with partial application()
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.init_std = init_std
at: transformers.models.bart.modeling_flax_bart.FlaxBartAttention
config: BartConfig
embed_dim: int
num_heads: int
dropout: float = 0.0
causal: bool = False
bias: bool = True
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
setup(self) -> None
===========changed ref 0===========
<s> = None,
mask: Optional[Any] = None,
embed_pos: Optional[Any] = None,
broadcast_dropout: bool = True,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.0,
deterministic: bool = False,
dtype: Any = jnp.float32,
precision: PrecisionLike = None,
sinkhorn_iters: int = 1,
is_encoder: bool = False,
+ tau=None,
):
"""
Computes dot-product attention weights given query and key.
mask is included into the bias.
Adapted from flax.linen.attention.dot_product_attention_weights"
"""
assert query.ndim == key.ndim, "q, k must have same rank."
assert query.shape[:-3] == key.shape[:-3], "q, k batch dims must match."
assert query.shape[-2] == key.shape[-2], "q, k num_heads must match."
assert query.shape[-1] == key.shape[-1], "q, k depths must match."
- # calculate attention matrix
- depth = query.shape[-1]
- query = query / jnp.sqrt(depth).astype(dtype)
# attn weight shape is (batch..., num_heads, q_length, kv_length)
attn_weights = jnp.einsum("...qhd,...khd->...hqk", query, key, precision=precision)
+
+ # divide by tau (used in Swin v2)
+ if tau is not None:
+ attn_weights = attn_weights / tau
+ else:
+ depth = query.shape[-1]
+ attn_weights = attn_weights / jnp.sqrt(depth).astype(dtype)
# apply attention bias: masking, dropout, proximity bias, etc.
if bias is not None:
attn_weights = attn_weights + bias
# add relative position
if embed_pos is not None:
</s>
===========changed ref 1===========
<s> mask: Optional[Any] = None,
embed_pos: Optional[Any] = None,
broadcast_dropout: bool = True,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.0,
deterministic: bool = False,
dtype: Any = jnp.float32,
precision: PrecisionLike = None,
sinkhorn_iters: int = 1,
is_encoder: bool = False,
+ tau=None,
):
# offset: 1
<s> attn_weights = attn_weights + bias
# add relative position
if embed_pos is not None:
attn_weights = attn_weights + embed_pos
# normalize the attention weights
if not is_encoder or sinkhorn_iters == 1:
# sinkhorn does not work for causal (leaks info of future tokens into past)
attn_weights = jax.nn.softmax(attn_weights).astype(dtype)
else:
# adapted from https://github.com/lucidrains/sinkhorn-transformer
for i in range(sinkhorn_iters):
# when causal, some attn_weights have been set to -inf through bias
if i % 2 == 0:
attn_weights -= jax.nn.logsumexp(attn_weights, axis=-1, keepdims=True)
else:
attn_weights -= jax.nn.logsumexp(attn_weights, axis=-2, keepdims=True)
if mask is not None:
attn_weights = jnp.where(mask, attn_weights, -jnp.inf)
attn_weights = jnp.exp(attn_weights).astype(dtype)
# apply attention dropout
if not deterministic and dropout_rate > 0.0:
keep_prob = 1.0 - dropout_rate
if broadcast_dropout:
# dropout is broadcast across</s>
===========changed ref 2===========
<s> mask: Optional[Any] = None,
embed_pos: Optional[Any] = None,
broadcast_dropout: bool = True,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.0,
deterministic: bool = False,
dtype: Any = jnp.float32,
precision: PrecisionLike = None,
sinkhorn_iters: int = 1,
is_encoder: bool = False,
+ tau=None,
):
# offset: 2
<s> + head dimensions
dropout_shape = tuple([1] * (key.ndim - 2)) + attn_weights.shape[-2:]
keep = jax.random.bernoulli(dropout_rng, keep_prob, dropout_shape)
else:
keep = jax.random.bernoulli(dropout_rng, keep_prob, attn_weights.shape)
multiplier = keep.astype(attn_weights.dtype) / jnp.asarray(
keep_prob, dtype=dtype
)
attn_weights = attn_weights * multiplier
return attn_weights
|
|
dalle_mini.model.modeling/FlaxBartAttention.__call__
|
Modified
|
borisdayma~dalle-mini
|
f05d82b9c474a27f9def742ce97c1d265a2952e4
|
fix: swin v2 scaling (thanks @lucidrains) (#297)
|
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
<0> """Input shape: Batch x Time x Channel"""
<1>
<2> # if key_value_states are provided this layer is used as a cross-attention layer
<3> # for the decoder
<4> is_cross_attention = key_value_states is not None
<5> batch_size = hidden_states.shape[0]
<6>
<7> # get query proj
<8> query_states = self.q_proj(hidden_states)
<9> # get key, value proj
<10> if is_cross_attention:
<11> # cross_attentions
<12> key_states = self.k_proj(key_value_states)
<13> value_states = self.v_proj(key_value_states)
<14> else:
<15> # self_attention
<16> key_states = self.k_proj(hidden_states)
<17> value_states = self.v_proj(hidden_states)
<18>
<19> query_states = self._split_heads(query_states)
<20> key_states = self._split_heads(key_states)
<21> value_states = self._split_heads(value_states)
<22>
<23> # handle cache prepare causal attention mask
<24> if self.causal:
<25> query_length, key_length = query_states.shape[1], key_states.shape[1]
<26> if self.has_variable("cache", "cached_key"):
<27> mask_shift = self.variables["cache"]["cache_index"]
<28> max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
<29> causal_mask = lax.dynamic_slice(
</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 1
(0, 0, mask_shift, 0),
(1, 1, query_length, max_decoder_length),
)
else:
causal_mask = self.causal_mask[:, :, :query_length, :key_length]
causal_mask = jnp.broadcast_to(
causal_mask, (batch_size,) + causal_mask.shape[1:]
)
# combine masks if needed
if attention_mask is not None and self.causal:
attention_mask = jnp.broadcast_to(
jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape
)
attention_mask = combine_masks(attention_mask, causal_mask)
elif self.causal:
attention_mask = causal_mask
elif attention_mask is not None:
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
key_states, value_states, attention_mask = self._concatenate_to_cache(
key_states, value_states, query_states, attention_mask
)
# Convert the boolean attention mask to an attention bias.
if attention_mask is not None:
# attention mask in the form of</s>
===========below chunk 1===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 2
<s> # Convert the boolean attention mask to an attention bias.
if attention_mask is not None:
# attention mask in the form of attention bias
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, -jnp.inf).astype(self.dtype),
)
else:
attention_bias = None
dropout_rng = None
if not deterministic and self.dropout > 0.0:
dropout_rng = self.make_rng("dropout")
if self.config.use_cosine_attention:
# normalize q and k
query_states = query_states / (
jnp.linalg.norm(query_states, axis=-1, keepdims=True) + 1e-8
)
key_states = key_states / (
jnp.linalg.norm(key_states, axis=-1, keepdims=True) + 1e-8
)
# relative position embeddings
if self.config.use_swin_position_embeddings:
position_ids = jnp.arange(self.q_length)
embed_pos = self.rel_bias(position_ids)
embed_pos = rearrange(embed_pos, "q (k h) -> 1 h q k", h=self.num_heads)
else:
</s>
===========below chunk 2===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 3
<s>_pos = None
attn_weights = dot_product_attention_weights(
query_states,
key_states,
bias=attention_bias,
mask=attention_mask,
embed_pos=embed_pos,
dropout_rng=dropout_rng,
dropout_rate=self.dropout,
broadcast_dropout=True,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
sinkhorn_iters=self.config.sinkhorn_iters,
is_encoder=self.is_encoder,
)
if self.config.use_cosine_attention:
# divide by tau
attn_weights = attn_weights / jnp.maximum(self.tau, 0.01)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
if self.config.use_head_scale:
# per Normformer
attn_output = attn_output * self.head_scale
attn_output = self._merge_heads(attn_output)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
===========unchanged ref 0===========
at: dalle_mini.model.modeling
dot_product_attention_weights(query: Any, key: Any, bias: Optional[Any]=None, mask: Optional[Any]=None, embed_pos: Optional[Any]=None, broadcast_dropout: bool=True, dropout_rng: Optional[PRNGKey]=None, dropout_rate: float=0.0, deterministic: bool=False, dtype: Any=jnp.float32, precision: PrecisionLike=None, sinkhorn_iters: int=1, is_encoder: bool=False, tau=None)
at: dalle_mini.model.modeling.FlaxBartAttention.setup
self.q_proj = dense(
kernel_init=deepnet_init()
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.k_proj = dense(
kernel_init=deepnet_init()
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.v_proj = dense(
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.causal_mask = make_causal_mask(
jnp.ones((1, self.config.image_length), dtype="bool"), dtype="bool"
)
at: transformers.models.bart.modeling_flax_bart.FlaxBartAttention
config: BartConfig
dropout: float = 0.0
causal: bool = False
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
_split_heads(hidden_states)
_concatenate_to_cache(key, value, query, attention_mask)
|
|
tools.train.scalable_shampoo.sm3/sm3
|
Modified
|
borisdayma~dalle-mini
|
9952d4c883ad07c127f58108fae778badcd3c18f
|
feat: update shampoo
|
# module: tools.train.scalable_shampoo.sm3
def sm3(
learning_rate, beta1=0.9, beta2=0.999, diagonal_epsilon=1e-10, normalize_grads=False
):
<0> """SM3 optimizer.
<1>
<2> Memory-Efficient Adaptive Optimization, Rohan Anil, Vineet Gupta, Tomer Koren,
<3> Yoram Singer
<4>
<5> https://arxiv.org/abs/1901.11150
<6>
<7> Args:
<8> learning_rate: the step size used to update the parameters.
<9> beta1: momentum parameter.
<10> beta2: second moment averaging parameter.
<11> diagonal_epsilon: epsilon for sm3
<12> normalize_grads: Whether to normalize grads. Author finds it useful when
<13> grads are high variance.
<14>
<15> Returns:
<16> a GradientTransformation.
<17> """
<18>
<19> def _quantize_momentum(momentum_statistics):
<20> return QuantizedValue.from_float_value(momentum_statistics, jnp.int8)
<21>
<22> def init_fn(params):
<23> """Initialise the optimiser's state."""
<24>
<25> def _init(param):
<26> accumulators = [jnp.zeros([s]) for s in param.shape]
<27> momentum = _quantize_momentum(jnp.zeros_like(param))
<28> return ParameterStats(accumulators, momentum)
<29>
<30> return SM3State(
<31> count=jnp.zeros([], jnp.int32), stats=jax.tree_map(_init, params)
<32> )
<33>
<34> def _get_expanded_shape(shape, i):
<35> rank = len(shape)
<36> # Replaces a `shape` of [M, N, K] with 1 in all dimensions except for i.
<37> # For eg: i = 1 returns [1, N, 1].
<38> return [1] * i + [shape[i]] + [1] * (rank - i - 1)
</s>
|
===========below chunk 0===========
# module: tools.train.scalable_shampoo.sm3
def sm3(
learning_rate, beta1=0.9, beta2=0.999, diagonal_epsilon=1e-10, normalize_grads=False
):
# offset: 1
def _moving_averages(grad, accumulators):
w = (1.0 - beta2) if beta2 != 1.0 else 1.0
if grad.ndim < 2:
return beta2 * accumulators[0] + w * grad**2
else:
min_accumulator = functools.reduce(jnp.minimum, accumulators)
return beta2 * min_accumulator + w * grad**2
def _moving_averages_momentum(grad, momentum):
w = (1.0 - beta1) if beta1 != 1.0 else 1.0
return beta1 * momentum.to_float() + w * grad
def _sketch_diagonal_statistics(grad, updated_diagonal_statistics):
all_diagonal_statistics = []
for i in range(grad.ndim):
axes = list(range(i)) + list(range(i + 1, grad.ndim))
dim_diagonal_statistics = jnp.max(updated_diagonal_statistics, axis=axes)
all_diagonal_statistics.append(dim_diagonal_statistics)
if grad.ndim == 1:
all_diagonal_statistics[0] = updated_diagonal_statistics
return all_diagonal_statistics
def update_fn(updates, state, params=None):
del params
stats = state.stats
if normalize_grads:
updates = jax.tree_map(lambda g: g / (jnp.linalg.norm(g) + 1e-16), updates)
# Reshape all vectors into N-d tensors to compute min over them.
# [n], [m] -> [n, 1], [1, m]
expanded_diagonal_statistics = jax.tree_multimap(
lambda grad, state: [ # pylint:disable=g-long-lambda
jnp</s>
===========below chunk 1===========
# module: tools.train.scalable_shampoo.sm3
def sm3(
learning_rate, beta1=0.9, beta2=0.999, diagonal_epsilon=1e-10, normalize_grads=False
):
# offset: 2
<s> jax.tree_multimap(
lambda grad, state: [ # pylint:disable=g-long-lambda
jnp.reshape(
state.diagonal_statistics[i], _get_expanded_shape(grad.shape, i)
)
for i in range(grad.ndim)
],
updates,
stats,
)
# Compute new diagonal statistics
new_diagonal_statistics = jax.tree_multimap(
_moving_averages, updates, expanded_diagonal_statistics
)
# Compute preconditioners (1/sqrt(s)) where s is the statistics.
new_preconditioners = jax.tree_map(
lambda t: 1.0 / jnp.sqrt(t + diagonal_epsilon), new_diagonal_statistics
)
preconditioned_grads = jax.tree_multimap(
lambda g, p: g * p, updates, new_preconditioners
)
# Compute updated momentum (also handle quantization)
updated_momentum = jax.tree_multimap(
lambda preconditioned_grad, state: _moving_averages_momentum( # pylint:disable=g-long-lambda
preconditioned_grad, state.diagonal_momentum
),
preconditioned_grads,
stats,
)
# Update diagonal statistics.
updated_diagonal_statistics = jax.tree_multimap(
_sketch_diagonal_statistics, updates, new_diagonal_statistics
)
# Update momentum.
new_sm3_stats = jax.tree_multimap(
lambda momentum, diagonal_stats: ParameterStats( # pylint:disable=g-long-lambda
</s>
===========below chunk 2===========
# module: tools.train.scalable_shampoo.sm3
def sm3(
learning_rate, beta1=0.9, beta2=0.999, diagonal_epsilon=1e-10, normalize_grads=False
):
# offset: 3
<s>stats, _quantize_momentum(momentum)
),
updated_momentum,
updated_diagonal_statistics,
)
lr = learning_rate
if callable(learning_rate):
lr = learning_rate(state.count)
new_updates = jax.tree_map(lambda pg: -lr * pg, updated_momentum)
return new_updates, SM3State(count=state.count + 1, stats=new_sm3_stats)
return optax.GradientTransformation(init_fn, update_fn)
===========unchanged ref 0===========
at: functools
reduce(function, iterable, initial=None, /) -> value
reduce(function: Callable[[_T, _S], _T], sequence: Iterable[_S], initial: _T) -> _T
reduce(function: Callable[[_T, _T], _T], sequence: Iterable[_T]) -> _T
at: tools.train.scalable_shampoo.quantization_utils
QuantizedValue()
at: tools.train.scalable_shampoo.quantization_utils.QuantizedValue
quantized: chex.Array
diagonal: chex.Array # Diagonal (if extract_diagonal is set)
bucket_size: chex.Array
quantized_dtype: jnp.dtype = struct.field(
pytree_node=False
) # Dtype for the quantized value.
extract_diagonal: bool = struct.field(pytree_node=False) # In case its centered.
shape: Any = struct.field(pytree_node=False) # Shape of the tensor.
from_float_value(fvalue, quantized_dtype, extract_diagonal=False)
at: tools.train.scalable_shampoo.sm3
SM3State(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
ParameterStats(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
|
|
tools.train.scalable_shampoo.distributed_shampoo/BlockPartitioner.__init__
|
Modified
|
borisdayma~dalle-mini
|
9952d4c883ad07c127f58108fae778badcd3c18f
|
feat: update shampoo
|
<16>:<del> self._num_splits = len(split_sizes)
<17>:<del> self._preconditioner_shapes = []
<18>:<del> for t in itertools.product(*split_sizes):
<19>:<del> self._preconditioner_shapes.extend([[d, d] for d in t])
<20>:<add> self._split_sizes = split_sizes
|
# module: tools.train.scalable_shampoo.distributed_shampoo
class BlockPartitioner:
def __init__(self, param, block_size):
<0> self._shape = param.shape
<1> self._splits = []
<2> split_sizes = []
<3> # We split params into smaller blocks. Here we store the metadata to make
<4> # that split.
<5> for i, d in enumerate(param.shape):
<6> if 0 < block_size < d:
<7> # d-1, otherwise split appends a 0-size array.
<8> nsplit = (d - 1) // block_size
<9> indices = (np.arange(nsplit, dtype=np.int32) + 1) * block_size
<10> sizes = np.ones(nsplit + 1, dtype=np.int32) * block_size
<11> sizes[-1] = d - indices[-1]
<12> self._splits.append((i, indices))
<13> split_sizes.append(sizes)
<14> else:
<15> split_sizes.append(np.array([d], dtype=np.int32))
<16> self._num_splits = len(split_sizes)
<17> self._preconditioner_shapes = []
<18> for t in itertools.product(*split_sizes):
<19> self._preconditioner_shapes.extend([[d, d] for d in t])
<20>
|
===========unchanged ref 0===========
at: itertools
product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3], iter4: Iterable[_T4], iter5: Iterable[_T5], iter6: Iterable[_T6]) -> Iterator[Tuple[_T1, _T2, _T3, _T4, _T5, _T6]]
product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3]) -> Iterator[Tuple[_T1, _T2, _T3]]
product(iter1: Iterable[_T1], iter2: Iterable[_T2]) -> Iterator[Tuple[_T1, _T2]]
product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3], iter4: Iterable[_T4]) -> Iterator[Tuple[_T1, _T2, _T3, _T4]]
product(*iterables: Iterable[_T1], repeat: int) -> Iterator[Tuple[_T1, ...]]
product(iter1: Iterable[_T1]) -> Iterator[Tuple[_T1]]
product(*iterables: Iterable[Any], repeat: int=...) -> Iterator[Tuple[Any, ...]]
product(iter1: Iterable[Any], iter2: Iterable[Any], iter3: Iterable[Any], iter4: Iterable[Any], iter5: Iterable[Any], iter6: Iterable[Any], iter7: Iterable[Any], *iterables: Iterable[Any]) -> Iterator[Tuple[Any, ...]]
product(iter1: Iterable[_T1], iter2: Iterable[_T2], iter3: Iterable[_T3], iter4: Iterable[_T4], iter5: Iterable[_T5]) -> Iterator[Tuple[_T1, _T2, _T3, _T4, _T5]]
at: numpy
int32 = signedinteger[_32Bit]
at: numpy.core._multiarray_umath
arange(start=None, stop, step=None, , dtype=None, *, like=None, /)
===========unchanged ref 1===========
array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0, like=None, /)
_DTypeMeta(dtype, align=False, copy=False, metadata=None, /)
at: numpy.core.numeric
ones(shape: _ShapeLike, dtype: None=..., order: _OrderCF=..., *, like: _SupportsArrayFunc=...) -> NDArray[float64]
ones(shape: _ShapeLike, dtype: DTypeLike, order: _OrderCF=..., *, like: _SupportsArrayFunc=...) -> NDArray[Any]
ones(shape: _ShapeLike, dtype: _DTypeLike[_SCT], order: _OrderCF=..., *, like: _SupportsArrayFunc=...) -> NDArray[_SCT]
===========changed ref 0===========
# module: tools.train.scalable_shampoo.distributed_shampoo
+ class PreconditionerType(enum.IntEnum):
+ # Default, computes preconditioner for each dim
+ ALL = 1
+ # One sided Shampoo, in this cases only on input dim.
+ # Assumes last dim is always the output dim and everything else input dim.
+ INPUT = 2
+
===========changed ref 1===========
# module: tools.train.scalable_shampoo.distributed_shampoo
+ def _pth_root_difference(w, alpha, beta, p):
+ """Computes (w+alpha)^(-1/p)-(w+beta)^(-1/p)."""
+
+ a = w + alpha
+ b = w + beta
+ a_minus_b = alpha - beta
+ exp = -1 / p
+
+ def _stable_subtract(b, a_minus_b):
+ # Mathematically identical to the target expression, with (w+beta)^(-1/p)
+ # term factored out and w cancellation in the subtraction.
+ return (b**exp) * jnp.expm1(exp * jnp.log1p(a_minus_b / b))
+
+ return jnp.where(
+ # Choose the branch with the best log1p approximation.
+ jnp.abs(a_minus_b / b) < jnp.abs(a_minus_b / a),
+ -_stable_subtract(a, -a_minus_b),
+ _stable_subtract(b, a_minus_b),
+ )
+
===========changed ref 2===========
# module: tools.train.scalable_shampoo.distributed_shampoo
def matrix_inverse_pth_root(
matrix,
p,
num_iters=100,
ridge_epsilon=1e-6,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
+ relative_matrix_epsilon=True,
+ lobpcg_topk_precondition=0,
+ lobpcg_max_iter=0,
):
"""Computes `matrix^(-1/p)`, where `p` is a positive integer.
This function uses the Coupled newton iterations algorithm for
the computation of a matrix's inverse pth root.
References:
[Functions of Matrices, Theory and Computation,
Nicholas J Higham, Pg 184, Eq 7.18](
https://epubs.siam.org/doi/book/10.1137/1.9780898717778)
Args:
matrix: the symmetric PSD matrix whose power it to be computed
p: exponent, for p a positive integer.
num_iters: Maximum number of iterations.
ridge_epsilon: Ridge epsilon added to make the matrix positive definite.
error_tolerance: Error indicator, useful for early termination.
precision: precision XLA related flag, the available options are: a)
lax.Precision.DEFAULT (better step time, but not precise) b)
lax.Precision.HIGH (increased precision, slower) c) lax.Precision.HIGHEST
(best possible precision, slowest)
+ relative_matrix_epsilon: Whether to use relative epsilon to the max eigen
+ value when computing inverse-pth root.
+ lobpcg_topk_precondition: If nonzero, specifies the number of top
+ eigenvectors to subtract out before performing LOBPCG. Note this makes
+ relative_matrix_epsilon essentially free.
+ lobpcg_max_iter: Maximum iteration count for LOBPCG, defaults to
+ `lobpcg_</s>
|
tools.train.scalable_shampoo.distributed_shampoo/Preconditioner.__init__
|
Modified
|
borisdayma~dalle-mini
|
9952d4c883ad07c127f58108fae778badcd3c18f
|
feat: update shampoo
|
<0>:<add> """Initializes the preconditioner.
<add>
<add> Args:
<add> param: parameter to precondition.
<add> block_size: Block size used to split param.
<add> merge_small_dims_block_size: Block size for merging dims.
<add> best_effort_shape_interpretation: Whether to collapse/merge dims together.
<add> preconditioner_type: Type of preconditioner to use.
<add> """
<3>:<add> self._transformed_shape = merge_small_dims(
<del> self._transformed_shape = merge_small_dims(self._original_shape, block_size)
<4>:<add> self._original_shape, merge_small_dims_block_size
<add> )
<6>:<add> self._preconditioner_type = preconditioner_type
|
# module: tools.train.scalable_shampoo.distributed_shampoo
class Preconditioner:
+ def __init__(
+ self,
+ param,
+ block_size,
+ merge_small_dims_block_size,
+ best_effort_shape_interpretation,
+ preconditioner_type=PreconditionerType.ALL,
+ ):
- def __init__(self, param, block_size, best_effort_shape_interpretation):
<0> self._original_shape = param.shape
<1> self._transformed_shape = param.shape
<2> if best_effort_shape_interpretation:
<3> self._transformed_shape = merge_small_dims(self._original_shape, block_size)
<4> reshaped_param = jnp.reshape(param, self._transformed_shape)
<5> self._partitioner = BlockPartitioner(reshaped_param, block_size)
<6>
|
===========unchanged ref 0===========
at: tools.train.scalable_shampoo.distributed_shampoo
merge_small_dims(shape_to_merge, max_dim)
BlockPartitioner(param, block_size)
===========changed ref 0===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class BlockPartitioner:
- def num_splits(self):
- return self._num_splits
-
===========changed ref 1===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class BlockPartitioner:
- def shapes_for_preconditioners(self):
- return self._preconditioner_shapes
-
===========changed ref 2===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class BlockPartitioner:
def __init__(self, param, block_size):
self._shape = param.shape
self._splits = []
split_sizes = []
# We split params into smaller blocks. Here we store the metadata to make
# that split.
for i, d in enumerate(param.shape):
if 0 < block_size < d:
# d-1, otherwise split appends a 0-size array.
nsplit = (d - 1) // block_size
indices = (np.arange(nsplit, dtype=np.int32) + 1) * block_size
sizes = np.ones(nsplit + 1, dtype=np.int32) * block_size
sizes[-1] = d - indices[-1]
self._splits.append((i, indices))
split_sizes.append(sizes)
else:
split_sizes.append(np.array([d], dtype=np.int32))
- self._num_splits = len(split_sizes)
- self._preconditioner_shapes = []
- for t in itertools.product(*split_sizes):
- self._preconditioner_shapes.extend([[d, d] for d in t])
+ self._split_sizes = split_sizes
===========changed ref 3===========
# module: tools.train.scalable_shampoo.distributed_shampoo
+ class PreconditionerType(enum.IntEnum):
+ # Default, computes preconditioner for each dim
+ ALL = 1
+ # One sided Shampoo, in this cases only on input dim.
+ # Assumes last dim is always the output dim and everything else input dim.
+ INPUT = 2
+
===========changed ref 4===========
# module: tools.train.scalable_shampoo.distributed_shampoo
+ def _pth_root_difference(w, alpha, beta, p):
+ """Computes (w+alpha)^(-1/p)-(w+beta)^(-1/p)."""
+
+ a = w + alpha
+ b = w + beta
+ a_minus_b = alpha - beta
+ exp = -1 / p
+
+ def _stable_subtract(b, a_minus_b):
+ # Mathematically identical to the target expression, with (w+beta)^(-1/p)
+ # term factored out and w cancellation in the subtraction.
+ return (b**exp) * jnp.expm1(exp * jnp.log1p(a_minus_b / b))
+
+ return jnp.where(
+ # Choose the branch with the best log1p approximation.
+ jnp.abs(a_minus_b / b) < jnp.abs(a_minus_b / a),
+ -_stable_subtract(a, -a_minus_b),
+ _stable_subtract(b, a_minus_b),
+ )
+
===========changed ref 5===========
# module: tools.train.scalable_shampoo.distributed_shampoo
def matrix_inverse_pth_root(
matrix,
p,
num_iters=100,
ridge_epsilon=1e-6,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
+ relative_matrix_epsilon=True,
+ lobpcg_topk_precondition=0,
+ lobpcg_max_iter=0,
):
"""Computes `matrix^(-1/p)`, where `p` is a positive integer.
This function uses the Coupled newton iterations algorithm for
the computation of a matrix's inverse pth root.
References:
[Functions of Matrices, Theory and Computation,
Nicholas J Higham, Pg 184, Eq 7.18](
https://epubs.siam.org/doi/book/10.1137/1.9780898717778)
Args:
matrix: the symmetric PSD matrix whose power it to be computed
p: exponent, for p a positive integer.
num_iters: Maximum number of iterations.
ridge_epsilon: Ridge epsilon added to make the matrix positive definite.
error_tolerance: Error indicator, useful for early termination.
precision: precision XLA related flag, the available options are: a)
lax.Precision.DEFAULT (better step time, but not precise) b)
lax.Precision.HIGH (increased precision, slower) c) lax.Precision.HIGHEST
(best possible precision, slowest)
+ relative_matrix_epsilon: Whether to use relative epsilon to the max eigen
+ value when computing inverse-pth root.
+ lobpcg_topk_precondition: If nonzero, specifies the number of top
+ eigenvectors to subtract out before performing LOBPCG. Note this makes
+ relative_matrix_epsilon essentially free.
+ lobpcg_max_iter: Maximum iteration count for LOBPCG, defaults to
+ `lobpcg_</s>
===========changed ref 6===========
<s>.scalable_shampoo.distributed_shampoo
def matrix_inverse_pth_root(
matrix,
p,
num_iters=100,
ridge_epsilon=1e-6,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
+ relative_matrix_epsilon=True,
+ lobpcg_topk_precondition=0,
+ lobpcg_max_iter=0,
):
# offset: 1
<s>
+ lobpcg_max_iter: Maximum iteration count for LOBPCG, defaults to
+ `lobpcg_topk_precondition`.
Returns:
+ matrix^(-1/p) and the error
- matrix^(-1/p)
"""
# If the input is not square, materialize it from the concatenated form.
if matrix.shape[0] != matrix.shape[1]:
matrix = symmetric_matrices.materialize_matrix_from_concat(matrix)
assert matrix.shape[0] == matrix.shape[1]
# We use _MAT_INV_PTH_ROOT_DTYPE for the matrix inverse pth root.
# Switch to f64 if you have hardware that supports it. Enable the jax flag
# jax_enable_x64 for this to work.
matrix_size = matrix.shape[0]
orig_dtype = matrix.dtype
matrix = matrix.astype(_MAT_INV_PTH_ROOT_DTYPE)
alpha = jnp.asarray(-1.0 / p, _MAT_INV_PTH_ROOT_DTYPE)
identity = jnp.eye(matrix_size, dtype=_MAT_INV_PTH_ROOT_DTYPE)
+ original_matrix = matrix
+
+ if lobpcg_topk_precondition > 0:
+ # TODO(vladf): reuse previous top-k as the initial search directions
+ pad_shape =</s>
|
tools.train.scalable_shampoo.distributed_shampoo/Preconditioner.shapes_for_preconditioners
|
Modified
|
borisdayma~dalle-mini
|
9952d4c883ad07c127f58108fae778badcd3c18f
|
feat: update shampoo
|
<1>:<add> split_sizes = self._partitioner.split_sizes()
<add> rank = len(split_sizes)
<add> # We ignore preconditioner types if rank == 1
<add> preconditioner_shapes = []
<add> for t in itertools.product(*split_sizes):
<add> if self._preconditioner_type == PreconditionerType.ALL or rank <= 1:
<add> preconditioner_shapes.extend([[d, d] for d in t])
<add> else:
<add> preconditioner_shapes.extend([[d, d] for d in t[:-1]])
<add> return preconditioner_shapes
<del> return self._partitioner.shapes_for_preconditioners()
|
# module: tools.train.scalable_shampoo.distributed_shampoo
class Preconditioner:
def shapes_for_preconditioners(self):
<0> """Returns shape from statistics."""
<1> return self._partitioner.shapes_for_preconditioners()
<2>
|
===========unchanged ref 0===========
at: tools.train.scalable_shampoo.distributed_shampoo.BlockPartitioner
shapes_for_preconditioners()
at: tools.train.scalable_shampoo.distributed_shampoo.Preconditioner.__init__
self._partitioner = BlockPartitioner(reshaped_param, block_size)
===========changed ref 0===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class BlockPartitioner:
- def shapes_for_preconditioners(self):
- return self._preconditioner_shapes
-
===========changed ref 1===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class BlockPartitioner:
- def num_splits(self):
- return self._num_splits
-
===========changed ref 2===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class Preconditioner:
- def statistics_from_grad(self, grad):
- """Compute statistics from gradients.
-
- Args:
- grad: Gradient to compute statistics from.
-
- Returns:
- A list of gradient statistics for each partition.
- """
- reshaped_grad = jnp.reshape(grad, self._transformed_shape)
- partitioned_grads = self._partitioner.partition(reshaped_grad)
- stats = []
- for g in partitioned_grads:
- g_stats = []
- rank = len(g.shape)
- for i in range(rank):
- axes = list(range(i)) + list(range(i + 1, rank))
- stat = jnp.tensordot(g, g, axes=(axes, axes))
- g_stats.append(stat)
- stats.extend(g_stats)
- return stats
-
===========changed ref 3===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class Preconditioner:
+ def __init__(
+ self,
+ param,
+ block_size,
+ merge_small_dims_block_size,
+ best_effort_shape_interpretation,
+ preconditioner_type=PreconditionerType.ALL,
+ ):
- def __init__(self, param, block_size, best_effort_shape_interpretation):
+ """Initializes the preconditioner.
+
+ Args:
+ param: parameter to precondition.
+ block_size: Block size used to split param.
+ merge_small_dims_block_size: Block size for merging dims.
+ best_effort_shape_interpretation: Whether to collapse/merge dims together.
+ preconditioner_type: Type of preconditioner to use.
+ """
self._original_shape = param.shape
self._transformed_shape = param.shape
if best_effort_shape_interpretation:
+ self._transformed_shape = merge_small_dims(
- self._transformed_shape = merge_small_dims(self._original_shape, block_size)
+ self._original_shape, merge_small_dims_block_size
+ )
reshaped_param = jnp.reshape(param, self._transformed_shape)
self._partitioner = BlockPartitioner(reshaped_param, block_size)
+ self._preconditioner_type = preconditioner_type
===========changed ref 4===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class BlockPartitioner:
def __init__(self, param, block_size):
self._shape = param.shape
self._splits = []
split_sizes = []
# We split params into smaller blocks. Here we store the metadata to make
# that split.
for i, d in enumerate(param.shape):
if 0 < block_size < d:
# d-1, otherwise split appends a 0-size array.
nsplit = (d - 1) // block_size
indices = (np.arange(nsplit, dtype=np.int32) + 1) * block_size
sizes = np.ones(nsplit + 1, dtype=np.int32) * block_size
sizes[-1] = d - indices[-1]
self._splits.append((i, indices))
split_sizes.append(sizes)
else:
split_sizes.append(np.array([d], dtype=np.int32))
- self._num_splits = len(split_sizes)
- self._preconditioner_shapes = []
- for t in itertools.product(*split_sizes):
- self._preconditioner_shapes.extend([[d, d] for d in t])
+ self._split_sizes = split_sizes
===========changed ref 5===========
# module: tools.train.scalable_shampoo.distributed_shampoo
+ class PreconditionerType(enum.IntEnum):
+ # Default, computes preconditioner for each dim
+ ALL = 1
+ # One sided Shampoo, in this cases only on input dim.
+ # Assumes last dim is always the output dim and everything else input dim.
+ INPUT = 2
+
===========changed ref 6===========
# module: tools.train.scalable_shampoo.distributed_shampoo
+ def _pth_root_difference(w, alpha, beta, p):
+ """Computes (w+alpha)^(-1/p)-(w+beta)^(-1/p)."""
+
+ a = w + alpha
+ b = w + beta
+ a_minus_b = alpha - beta
+ exp = -1 / p
+
+ def _stable_subtract(b, a_minus_b):
+ # Mathematically identical to the target expression, with (w+beta)^(-1/p)
+ # term factored out and w cancellation in the subtraction.
+ return (b**exp) * jnp.expm1(exp * jnp.log1p(a_minus_b / b))
+
+ return jnp.where(
+ # Choose the branch with the best log1p approximation.
+ jnp.abs(a_minus_b / b) < jnp.abs(a_minus_b / a),
+ -_stable_subtract(a, -a_minus_b),
+ _stable_subtract(b, a_minus_b),
+ )
+
|
tools.train.scalable_shampoo.distributed_shampoo/Preconditioner.exponent_for_preconditioner
|
Modified
|
borisdayma~dalle-mini
|
9952d4c883ad07c127f58108fae778badcd3c18f
|
feat: update shampoo
|
<1>:<add> should_preconditioned_dims = self.should_precondition_dims()
<add> num_preconditioners = sum(should_preconditioned_dims)
<add> return 2 * num_preconditioners
<del> return 2 * len(self._transformed_shape)
|
# module: tools.train.scalable_shampoo.distributed_shampoo
class Preconditioner:
def exponent_for_preconditioner(self):
<0> """Returns exponent to use for inverse-pth root M^{-1/p}."""
<1> return 2 * len(self._transformed_shape)
<2>
|
===========unchanged ref 0===========
at: tools.train.scalable_shampoo.distributed_shampoo.Preconditioner.__init__
self._transformed_shape = param.shape
self._transformed_shape = merge_small_dims(self._original_shape, block_size)
===========changed ref 0===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class BlockPartitioner:
- def num_splits(self):
- return self._num_splits
-
===========changed ref 1===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class BlockPartitioner:
- def shapes_for_preconditioners(self):
- return self._preconditioner_shapes
-
===========changed ref 2===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class Preconditioner:
def shapes_for_preconditioners(self):
"""Returns shape from statistics."""
+ split_sizes = self._partitioner.split_sizes()
+ rank = len(split_sizes)
+ # We ignore preconditioner types if rank == 1
+ preconditioner_shapes = []
+ for t in itertools.product(*split_sizes):
+ if self._preconditioner_type == PreconditionerType.ALL or rank <= 1:
+ preconditioner_shapes.extend([[d, d] for d in t])
+ else:
+ preconditioner_shapes.extend([[d, d] for d in t[:-1]])
+ return preconditioner_shapes
- return self._partitioner.shapes_for_preconditioners()
===========changed ref 3===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class Preconditioner:
- def statistics_from_grad(self, grad):
- """Compute statistics from gradients.
-
- Args:
- grad: Gradient to compute statistics from.
-
- Returns:
- A list of gradient statistics for each partition.
- """
- reshaped_grad = jnp.reshape(grad, self._transformed_shape)
- partitioned_grads = self._partitioner.partition(reshaped_grad)
- stats = []
- for g in partitioned_grads:
- g_stats = []
- rank = len(g.shape)
- for i in range(rank):
- axes = list(range(i)) + list(range(i + 1, rank))
- stat = jnp.tensordot(g, g, axes=(axes, axes))
- g_stats.append(stat)
- stats.extend(g_stats)
- return stats
-
===========changed ref 4===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class Preconditioner:
+ def __init__(
+ self,
+ param,
+ block_size,
+ merge_small_dims_block_size,
+ best_effort_shape_interpretation,
+ preconditioner_type=PreconditionerType.ALL,
+ ):
- def __init__(self, param, block_size, best_effort_shape_interpretation):
+ """Initializes the preconditioner.
+
+ Args:
+ param: parameter to precondition.
+ block_size: Block size used to split param.
+ merge_small_dims_block_size: Block size for merging dims.
+ best_effort_shape_interpretation: Whether to collapse/merge dims together.
+ preconditioner_type: Type of preconditioner to use.
+ """
self._original_shape = param.shape
self._transformed_shape = param.shape
if best_effort_shape_interpretation:
+ self._transformed_shape = merge_small_dims(
- self._transformed_shape = merge_small_dims(self._original_shape, block_size)
+ self._original_shape, merge_small_dims_block_size
+ )
reshaped_param = jnp.reshape(param, self._transformed_shape)
self._partitioner = BlockPartitioner(reshaped_param, block_size)
+ self._preconditioner_type = preconditioner_type
===========changed ref 5===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class BlockPartitioner:
def __init__(self, param, block_size):
self._shape = param.shape
self._splits = []
split_sizes = []
# We split params into smaller blocks. Here we store the metadata to make
# that split.
for i, d in enumerate(param.shape):
if 0 < block_size < d:
# d-1, otherwise split appends a 0-size array.
nsplit = (d - 1) // block_size
indices = (np.arange(nsplit, dtype=np.int32) + 1) * block_size
sizes = np.ones(nsplit + 1, dtype=np.int32) * block_size
sizes[-1] = d - indices[-1]
self._splits.append((i, indices))
split_sizes.append(sizes)
else:
split_sizes.append(np.array([d], dtype=np.int32))
- self._num_splits = len(split_sizes)
- self._preconditioner_shapes = []
- for t in itertools.product(*split_sizes):
- self._preconditioner_shapes.extend([[d, d] for d in t])
+ self._split_sizes = split_sizes
===========changed ref 6===========
# module: tools.train.scalable_shampoo.distributed_shampoo
+ class PreconditionerType(enum.IntEnum):
+ # Default, computes preconditioner for each dim
+ ALL = 1
+ # One sided Shampoo, in this cases only on input dim.
+ # Assumes last dim is always the output dim and everything else input dim.
+ INPUT = 2
+
===========changed ref 7===========
# module: tools.train.scalable_shampoo.distributed_shampoo
+ def _pth_root_difference(w, alpha, beta, p):
+ """Computes (w+alpha)^(-1/p)-(w+beta)^(-1/p)."""
+
+ a = w + alpha
+ b = w + beta
+ a_minus_b = alpha - beta
+ exp = -1 / p
+
+ def _stable_subtract(b, a_minus_b):
+ # Mathematically identical to the target expression, with (w+beta)^(-1/p)
+ # term factored out and w cancellation in the subtraction.
+ return (b**exp) * jnp.expm1(exp * jnp.log1p(a_minus_b / b))
+
+ return jnp.where(
+ # Choose the branch with the best log1p approximation.
+ jnp.abs(a_minus_b / b) < jnp.abs(a_minus_b / a),
+ -_stable_subtract(a, -a_minus_b),
+ _stable_subtract(b, a_minus_b),
+ )
+
|
tools.train.scalable_shampoo.distributed_shampoo/Preconditioner.preconditioned_grad
|
Modified
|
borisdayma~dalle-mini
|
9952d4c883ad07c127f58108fae778badcd3c18f
|
feat: update shampoo
|
<13>:<del> num_splits = self._partitioner.num_splits()
<15>:<add> should_preconditioned_dims = self.should_precondition_dims()
<add> num_preconditioners = sum(should_preconditioned_dims)
<16>:<add> i * num_preconditioners : (i + 1) * num_preconditioners
<del> i * num_splits : (i + 1) * num_splits
<18>:<add> precond_g = g
<19>:<add> for j, precondition in enumerate(should_preconditioned_dims):
<add> if precondition:
<del> precond_g = g
<20>:<del> for j in range(rank):
<21>:<add> precond_g = jnp.tensordot(
<del> precond_g = jnp.tensordot(
<22>:<add> precond_g, preconditioners_for_grad[j], axes=[[0], [0]]
<del> precond_g, preconditioners_for_grad[j], axes=[[0], [0]]
<23>:<add> )
<del> )
<24>:<add> else:
<add> precond_g = jnp.transpose(precond_g, axes=(*range(1, rank), 0))
|
# module: tools.train.scalable_shampoo.distributed_shampoo
class Preconditioner:
def preconditioned_grad(self, grad, preconditioners):
<0> """Precondition the gradient.
<1>
<2> Args:
<3> grad: A gradient tensor to precondition.
<4> preconditioners: A list of preconditioners to apply.
<5>
<6> Returns:
<7> A preconditioned gradient.
<8> """
<9>
<10> reshaped_grad = jnp.reshape(grad, self._transformed_shape)
<11> partitioned_grads = self._partitioner.partition(reshaped_grad)
<12> preconditioned_partitioned_grads = []
<13> num_splits = self._partitioner.num_splits()
<14> for i, g in enumerate(partitioned_grads):
<15> preconditioners_for_grad = preconditioners[
<16> i * num_splits : (i + 1) * num_splits
<17> ]
<18> rank = len(g.shape)
<19> precond_g = g
<20> for j in range(rank):
<21> precond_g = jnp.tensordot(
<22> precond_g, preconditioners_for_grad[j], axes=[[0], [0]]
<23> )
<24> preconditioned_partitioned_grads.append(precond_g)
<25> merged_grad = self._partitioner.merge_partitions(
<26> preconditioned_partitioned_grads
<27> )
<28> return jnp.reshape(merged_grad, self._original_shape)
<29>
|
===========unchanged ref 0===========
at: tools.train.scalable_shampoo.distributed_shampoo.BlockPartitioner
num_splits()
partition(tensor)
merge_partitions(partitions)
at: tools.train.scalable_shampoo.distributed_shampoo.Preconditioner.__init__
self._original_shape = param.shape
self._transformed_shape = param.shape
self._transformed_shape = merge_small_dims(self._original_shape, block_size)
self._partitioner = BlockPartitioner(reshaped_param, block_size)
===========changed ref 0===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class BlockPartitioner:
- def num_splits(self):
- return self._num_splits
-
===========changed ref 1===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class Preconditioner:
def exponent_for_preconditioner(self):
"""Returns exponent to use for inverse-pth root M^{-1/p}."""
+ should_preconditioned_dims = self.should_precondition_dims()
+ num_preconditioners = sum(should_preconditioned_dims)
+ return 2 * num_preconditioners
- return 2 * len(self._transformed_shape)
===========changed ref 2===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class BlockPartitioner:
- def shapes_for_preconditioners(self):
- return self._preconditioner_shapes
-
===========changed ref 3===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class Preconditioner:
def shapes_for_preconditioners(self):
"""Returns shape from statistics."""
+ split_sizes = self._partitioner.split_sizes()
+ rank = len(split_sizes)
+ # We ignore preconditioner types if rank == 1
+ preconditioner_shapes = []
+ for t in itertools.product(*split_sizes):
+ if self._preconditioner_type == PreconditionerType.ALL or rank <= 1:
+ preconditioner_shapes.extend([[d, d] for d in t])
+ else:
+ preconditioner_shapes.extend([[d, d] for d in t[:-1]])
+ return preconditioner_shapes
- return self._partitioner.shapes_for_preconditioners()
===========changed ref 4===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class Preconditioner:
- def statistics_from_grad(self, grad):
- """Compute statistics from gradients.
-
- Args:
- grad: Gradient to compute statistics from.
-
- Returns:
- A list of gradient statistics for each partition.
- """
- reshaped_grad = jnp.reshape(grad, self._transformed_shape)
- partitioned_grads = self._partitioner.partition(reshaped_grad)
- stats = []
- for g in partitioned_grads:
- g_stats = []
- rank = len(g.shape)
- for i in range(rank):
- axes = list(range(i)) + list(range(i + 1, rank))
- stat = jnp.tensordot(g, g, axes=(axes, axes))
- g_stats.append(stat)
- stats.extend(g_stats)
- return stats
-
===========changed ref 5===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class Preconditioner:
+ def __init__(
+ self,
+ param,
+ block_size,
+ merge_small_dims_block_size,
+ best_effort_shape_interpretation,
+ preconditioner_type=PreconditionerType.ALL,
+ ):
- def __init__(self, param, block_size, best_effort_shape_interpretation):
+ """Initializes the preconditioner.
+
+ Args:
+ param: parameter to precondition.
+ block_size: Block size used to split param.
+ merge_small_dims_block_size: Block size for merging dims.
+ best_effort_shape_interpretation: Whether to collapse/merge dims together.
+ preconditioner_type: Type of preconditioner to use.
+ """
self._original_shape = param.shape
self._transformed_shape = param.shape
if best_effort_shape_interpretation:
+ self._transformed_shape = merge_small_dims(
- self._transformed_shape = merge_small_dims(self._original_shape, block_size)
+ self._original_shape, merge_small_dims_block_size
+ )
reshaped_param = jnp.reshape(param, self._transformed_shape)
self._partitioner = BlockPartitioner(reshaped_param, block_size)
+ self._preconditioner_type = preconditioner_type
===========changed ref 6===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class BlockPartitioner:
def __init__(self, param, block_size):
self._shape = param.shape
self._splits = []
split_sizes = []
# We split params into smaller blocks. Here we store the metadata to make
# that split.
for i, d in enumerate(param.shape):
if 0 < block_size < d:
# d-1, otherwise split appends a 0-size array.
nsplit = (d - 1) // block_size
indices = (np.arange(nsplit, dtype=np.int32) + 1) * block_size
sizes = np.ones(nsplit + 1, dtype=np.int32) * block_size
sizes[-1] = d - indices[-1]
self._splits.append((i, indices))
split_sizes.append(sizes)
else:
split_sizes.append(np.array([d], dtype=np.int32))
- self._num_splits = len(split_sizes)
- self._preconditioner_shapes = []
- for t in itertools.product(*split_sizes):
- self._preconditioner_shapes.extend([[d, d] for d in t])
+ self._split_sizes = split_sizes
===========changed ref 7===========
# module: tools.train.scalable_shampoo.distributed_shampoo
+ class PreconditionerType(enum.IntEnum):
+ # Default, computes preconditioner for each dim
+ ALL = 1
+ # One sided Shampoo, in this cases only on input dim.
+ # Assumes last dim is always the output dim and everything else input dim.
+ INPUT = 2
+
===========changed ref 8===========
# module: tools.train.scalable_shampoo.distributed_shampoo
+ def _pth_root_difference(w, alpha, beta, p):
+ """Computes (w+alpha)^(-1/p)-(w+beta)^(-1/p)."""
+
+ a = w + alpha
+ b = w + beta
+ a_minus_b = alpha - beta
+ exp = -1 / p
+
+ def _stable_subtract(b, a_minus_b):
+ # Mathematically identical to the target expression, with (w+beta)^(-1/p)
+ # term factored out and w cancellation in the subtraction.
+ return (b**exp) * jnp.expm1(exp * jnp.log1p(a_minus_b / b))
+
+ return jnp.where(
+ # Choose the branch with the best log1p approximation.
+ jnp.abs(a_minus_b / b) < jnp.abs(a_minus_b / a),
+ -_stable_subtract(a, -a_minus_b),
+ _stable_subtract(b, a_minus_b),
+ )
+
|
tools.train.scalable_shampoo.distributed_shampoo/_convert_to_parameter_stats
|
Modified
|
borisdayma~dalle-mini
|
9952d4c883ad07c127f58108fae778badcd3c18f
|
feat: update shampoo
|
<10>:<add> if not convert_statistics:
<add> new_statistics = None
|
# module: tools.train.scalable_shampoo.distributed_shampoo
+ def _convert_to_parameter_stats(global_stats, local_stat, convert_statistics=True):
- def _convert_to_parameter_stats(global_stats, local_stat):
<0> """Creates parameter stats from sharded stats."""
<1> index_start = int(local_stat.index_start)
<2> index_end = int(len(local_stat.sizes)) + index_start
<3> statistics = global_stats.statistics[index_start:index_end, :, :]
<4> preconditioners = global_stats.preconditioners[index_start:index_end, :, :]
<5> new_statistics = []
<6> new_preconditioners = []
<7> for i, size in enumerate(local_stat.sizes):
<8> new_statistics.append(statistics[i][:size, :size])
<9> new_preconditioners.append(preconditioners[i][:size, :size])
<10> return ParameterStats(
<11> local_stat.diagonal_statistics,
<12> new_statistics,
<13> new_preconditioners,
<14> local_stat.diagonal_momentum,
<15> local_stat.momentum,
<16> local_stat.training_metrics,
<17> )
<18>
|
===========unchanged ref 0===========
at: tools.train.scalable_shampoo.distributed_shampoo
ParameterStats(typename: str, fields: Iterable[Tuple[str, Any]]=..., **kwargs: Any)
===========changed ref 0===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class BlockPartitioner:
+ def split_sizes(self):
+ return self._split_sizes
+
===========changed ref 1===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class BlockPartitioner:
- def num_splits(self):
- return self._num_splits
-
===========changed ref 2===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class Preconditioner:
def exponent_for_preconditioner(self):
"""Returns exponent to use for inverse-pth root M^{-1/p}."""
+ should_preconditioned_dims = self.should_precondition_dims()
+ num_preconditioners = sum(should_preconditioned_dims)
+ return 2 * num_preconditioners
- return 2 * len(self._transformed_shape)
===========changed ref 3===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class BlockPartitioner:
- def shapes_for_preconditioners(self):
- return self._preconditioner_shapes
-
===========changed ref 4===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class Preconditioner:
def shapes_for_preconditioners(self):
"""Returns shape from statistics."""
+ split_sizes = self._partitioner.split_sizes()
+ rank = len(split_sizes)
+ # We ignore preconditioner types if rank == 1
+ preconditioner_shapes = []
+ for t in itertools.product(*split_sizes):
+ if self._preconditioner_type == PreconditionerType.ALL or rank <= 1:
+ preconditioner_shapes.extend([[d, d] for d in t])
+ else:
+ preconditioner_shapes.extend([[d, d] for d in t[:-1]])
+ return preconditioner_shapes
- return self._partitioner.shapes_for_preconditioners()
===========changed ref 5===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class Preconditioner:
- def statistics_from_grad(self, grad):
- """Compute statistics from gradients.
-
- Args:
- grad: Gradient to compute statistics from.
-
- Returns:
- A list of gradient statistics for each partition.
- """
- reshaped_grad = jnp.reshape(grad, self._transformed_shape)
- partitioned_grads = self._partitioner.partition(reshaped_grad)
- stats = []
- for g in partitioned_grads:
- g_stats = []
- rank = len(g.shape)
- for i in range(rank):
- axes = list(range(i)) + list(range(i + 1, rank))
- stat = jnp.tensordot(g, g, axes=(axes, axes))
- g_stats.append(stat)
- stats.extend(g_stats)
- return stats
-
===========changed ref 6===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class Preconditioner:
+ def __init__(
+ self,
+ param,
+ block_size,
+ merge_small_dims_block_size,
+ best_effort_shape_interpretation,
+ preconditioner_type=PreconditionerType.ALL,
+ ):
- def __init__(self, param, block_size, best_effort_shape_interpretation):
+ """Initializes the preconditioner.
+
+ Args:
+ param: parameter to precondition.
+ block_size: Block size used to split param.
+ merge_small_dims_block_size: Block size for merging dims.
+ best_effort_shape_interpretation: Whether to collapse/merge dims together.
+ preconditioner_type: Type of preconditioner to use.
+ """
self._original_shape = param.shape
self._transformed_shape = param.shape
if best_effort_shape_interpretation:
+ self._transformed_shape = merge_small_dims(
- self._transformed_shape = merge_small_dims(self._original_shape, block_size)
+ self._original_shape, merge_small_dims_block_size
+ )
reshaped_param = jnp.reshape(param, self._transformed_shape)
self._partitioner = BlockPartitioner(reshaped_param, block_size)
+ self._preconditioner_type = preconditioner_type
===========changed ref 7===========
# module: tools.train.scalable_shampoo.distributed_shampoo
class BlockPartitioner:
def __init__(self, param, block_size):
self._shape = param.shape
self._splits = []
split_sizes = []
# We split params into smaller blocks. Here we store the metadata to make
# that split.
for i, d in enumerate(param.shape):
if 0 < block_size < d:
# d-1, otherwise split appends a 0-size array.
nsplit = (d - 1) // block_size
indices = (np.arange(nsplit, dtype=np.int32) + 1) * block_size
sizes = np.ones(nsplit + 1, dtype=np.int32) * block_size
sizes[-1] = d - indices[-1]
self._splits.append((i, indices))
split_sizes.append(sizes)
else:
split_sizes.append(np.array([d], dtype=np.int32))
- self._num_splits = len(split_sizes)
- self._preconditioner_shapes = []
- for t in itertools.product(*split_sizes):
- self._preconditioner_shapes.extend([[d, d] for d in t])
+ self._split_sizes = split_sizes
|
dalle_mini.model.configuration/DalleBartConfig.__init__
|
Modified
|
borisdayma~dalle-mini
|
90e39c4f4fafdbbf0d9b3d37074f7f6c9199e3c9
|
feat: add magneto
|
<18>:<add> "subln",
<add> ], "ln_positions must be 'normformer', 'swinv2', 'cogview', 'postln', 'preln', 'subln'"
<del> ], "ln_positions must be 'normformer', 'swinv2', 'cogview', 'postln', 'preln'"
<27>:<add> self.use_subln_init = use_subln_init
|
<s> implemented yet - from "Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation"
sinkhorn_iters=1, # used in SinkFormers
use_final_ln_encoder=True, # final layer normalization in encoder
use_final_ln_decoder=True, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
force_ln_scale=False, # force scale in layernorm even when followed by dense layers
**kwargs,
):
<0> # text normalizer
<1> self.normalize_text = normalize_text
<2>
<3> # transformer variants
<4> self.use_bias = use_bias
<5> assert ln_type in [
<6> "rmsnorm",
<7> "layernorm",
<8> ], "ln_type must be 'rmsnorm' or 'layernorm'"
<9> self.ln_type = ln_type
<10> if ln_positions == "deepnet":
<11> ln_positions = "postln"
<12> assert ln_positions in [
<13> "normformer",
<14> "swinv2",
<15> "cogview",
<16> "postln",
<17> "preln",
<18> ], "ln_positions must be 'normformer', 'swinv2', 'cogview', 'postln', 'preln'"
<19> self.use_head_scale = use_head_scale
<20> assert use_alibi is False, "use_alibi is not supported yet"
<21> self.ln_positions = ln_positions
<22> self.use_cosine_attention = use_cosine_attention
<23> self.tau_init = tau_init
<24> self.use_absolute_position_embeddings = use_absolute_position_embeddings
<25> self.use_swin_position_embeddings = use_swin_position_embeddings
<26> self.use_deepnet_scaling = use_deepnet_scaling
<27> self.use_glu = use_glu
<28> self.use_alibi = use_alibi
<29> self.sinkhorn_iters = sinkhorn</s>
|
===========below chunk 0===========
<s>Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation"
sinkhorn_iters=1, # used in SinkFormers
use_final_ln_encoder=True, # final layer normalization in encoder
use_final_ln_decoder=True, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
force_ln_scale=False, # force scale in layernorm even when followed by dense layers
**kwargs,
):
# offset: 1
if ln_positions == "postln":
assert (
use_final_ln_encoder
), "use_final_ln_encoder must be True when ln_positions is 'postln'"
assert (
use_final_ln_decoder
), "use_final_ln_decoder must be True when ln_positions is 'postln'"
self.use_final_ln_encoder = use_final_ln_encoder
self.use_final_ln_decoder = use_final_ln_decoder
self.force_ln_scale = force_ln_scale
# common parameters
self.encoder_vocab_size = encoder_vocab_size
self.image_vocab_size = image_vocab_size
self.image_length = image_length
self.max_text_length = max_text_length
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.use_cache = use_cache
self.gradient_checkpointing = gradient_checkpointing
# all layers are the same in most configurations
</s>
===========below chunk 1===========
<s>Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation"
sinkhorn_iters=1, # used in SinkFormers
use_final_ln_encoder=True, # final layer normalization in encoder
use_final_ln_decoder=True, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
force_ln_scale=False, # force scale in layernorm even when followed by dense layers
**kwargs,
):
# offset: 2
<s>cache = use_cache
self.gradient_checkpointing = gradient_checkpointing
# all layers are the same in most configurations
self.use_scan = use_scan if use_scan is not None else ln_positions != "swinv2"
assert not (
self.use_scan and ln_positions == "swinv2"
), "scan cannot be used with 'swinv2'"
self.scale_embedding = (
scale_embedding # scale factor will be sqrt(d_model) if True
)
# special token id's are appended to vocab if not provided
decoder_start_token_id = kwargs.pop("decoder_start_token_id", image_vocab_size)
bos_token_id = kwargs.pop("bos_token_id", image_vocab_size)
pad_token_id = kwargs.pop("pad_token_id", image_vocab_size)
eos_token_id = kwargs.pop("eos_token_id", image_vocab_size)
# we generate to image_length + 1 (for bos) by default
min_length = kwargs.pop("min_length", image_length + 1)
max_length = kwargs.pop("max_length", image_length + 1)
super().__init__(
# args required in parent class
is_encoder_decoder=is_encoder_decoder,
tie_word_embeddings=tie_word_embeddings,
</s>
===========below chunk 2===========
<s>Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation"
sinkhorn_iters=1, # used in SinkFormers
use_final_ln_encoder=True, # final layer normalization in encoder
use_final_ln_decoder=True, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
force_ln_scale=False, # force scale in layernorm even when followed by dense layers
**kwargs,
):
# offset: 3
<s>eos_token_id=forced_eos_token_id,
decoder_start_token_id=decoder_start_token_id,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
min_length=min_length,
max_length=max_length,
do_sample=do_sample,
**kwargs,
)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get(
"force_bos_token_to_be_generated", False
):
self.forced_bos_token_id = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions."
"The config can simply be saved and uploaded again to be fixed."
)
===========unchanged ref 0===========
at: _warnings
warn(message: str, category: Optional[Type[Warning]]=..., stacklevel: int=..., source: Optional[Any]=...) -> None
warn(message: Warning, category: Any=..., stacklevel: int=..., source: Optional[Any]=...) -> None
at: transformers.configuration_utils.PretrainedConfig
model_type: str = ""
is_composition: bool = False
attribute_map: Dict[str, str] = {}
_auto_class: Optional[str] = None
__init__(**kwargs)
__init__(self, **kwargs)
at: transformers.configuration_utils.PretrainedConfig.__init__
self.bos_token_id = kwargs.pop("bos_token_id", None)
at: typing.Mapping
get(key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]
get(key: _KT) -> Optional[_VT_co]
at: typing.MutableMapping
pop(key: _KT, default: Union[_VT, _T]=...) -> Union[_VT, _T]
pop(key: _KT) -> _VT
|
dalle_mini.model.modeling/deepnet_init
|
Modified
|
borisdayma~dalle-mini
|
90e39c4f4fafdbbf0d9b3d37074f7f6c9199e3c9
|
feat: add magneto
|
<0>:<add> init = jax.nn.initializers.normal(init_std)
<del> init = jax.nn.initializers.glorot_normal()
|
# module: dalle_mini.model.modeling
# deepnet initialization
+ def deepnet_init(init_std, gain=1):
- def deepnet_init(gain=1):
<0> init = jax.nn.initializers.glorot_normal()
<1>
<2> def _init(*args, **kwargs):
<3> return gain * init(*args, **kwargs)
<4>
<5> return _init
<6>
|
===========changed ref 0===========
<s> implemented yet - from "Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation"
sinkhorn_iters=1, # used in SinkFormers
use_final_ln_encoder=True, # final layer normalization in encoder
use_final_ln_decoder=True, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
force_ln_scale=False, # force scale in layernorm even when followed by dense layers
**kwargs,
):
# text normalizer
self.normalize_text = normalize_text
# transformer variants
self.use_bias = use_bias
assert ln_type in [
"rmsnorm",
"layernorm",
], "ln_type must be 'rmsnorm' or 'layernorm'"
self.ln_type = ln_type
if ln_positions == "deepnet":
ln_positions = "postln"
assert ln_positions in [
"normformer",
"swinv2",
"cogview",
"postln",
"preln",
+ "subln",
+ ], "ln_positions must be 'normformer', 'swinv2', 'cogview', 'postln', 'preln', 'subln'"
- ], "ln_positions must be 'normformer', 'swinv2', 'cogview', 'postln', 'preln'"
self.use_head_scale = use_head_scale
assert use_alibi is False, "use_alibi is not supported yet"
self.ln_positions = ln_positions
self.use_cosine_attention = use_cosine_attention
self.tau_init = tau_init
self.use_absolute_position_embeddings = use_absolute_position_embeddings
self.use_swin_position_embeddings = use_swin_position_embeddings
self.use_deepnet_scaling = use_deepnet_scaling
+ self.use_subln_init = use_subln_init
self.</s>
===========changed ref 1===========
<s>Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation"
sinkhorn_iters=1, # used in SinkFormers
use_final_ln_encoder=True, # final layer normalization in encoder
use_final_ln_decoder=True, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
force_ln_scale=False, # force scale in layernorm even when followed by dense layers
**kwargs,
):
# offset: 1
<s> use_deepnet_scaling
+ self.use_subln_init = use_subln_init
self.use_glu = use_glu
self.use_alibi = use_alibi
self.sinkhorn_iters = sinkhorn_iters
if ln_positions == "postln":
assert (
use_final_ln_encoder
), "use_final_ln_encoder must be True when ln_positions is 'postln'"
assert (
use_final_ln_decoder
), "use_final_ln_decoder must be True when ln_positions is 'postln'"
self.use_final_ln_encoder = use_final_ln_encoder
self.use_final_ln_decoder = use_final_ln_decoder
self.force_ln_scale = force_ln_scale
# common parameters
self.encoder_vocab_size = encoder_vocab_size
self.image_vocab_size = image_vocab_size
self.image_length = image_length
self.max_text_length = max_text_length
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_</s>
===========changed ref 2===========
<s>Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation"
sinkhorn_iters=1, # used in SinkFormers
use_final_ln_encoder=True, # final layer normalization in encoder
use_final_ln_decoder=True, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
force_ln_scale=False, # force scale in layernorm even when followed by dense layers
**kwargs,
):
# offset: 2
<s> self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.use_cache = use_cache
self.gradient_checkpointing = gradient_checkpointing
# all layers are the same in most configurations
self.use_scan = use_scan if use_scan is not None else ln_positions != "swinv2"
assert not (
self.use_scan and ln_positions == "swinv2"
), "scan cannot be used with 'swinv2'"
self.scale_embedding = (
scale_embedding # scale factor will be sqrt(d_model) if True
)
# special token id's are appended to vocab if not provided
decoder_start_token_id = kwargs.pop("decoder_start_token_id", image_vocab_size)
bos_token_id = kwargs.pop("bos_token_id", image_vocab_size)
pad_token_id = kwargs.pop("pad_token_id", image_vocab_size)
eos_token_id = kwargs.pop("eos_token_id", image_vocab_size)
# we generate to image_length + 1 (for bos) by default
min_length = kwargs.pop("min_length", image_length +</s>
===========changed ref 3===========
<s>Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation"
sinkhorn_iters=1, # used in SinkFormers
use_final_ln_encoder=True, # final layer normalization in encoder
use_final_ln_decoder=True, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
force_ln_scale=False, # force scale in layernorm even when followed by dense layers
**kwargs,
):
# offset: 3
<s>
max_length = kwargs.pop("max_length", image_length + 1)
super().__init__(
# args required in parent class
is_encoder_decoder=is_encoder_decoder,
tie_word_embeddings=tie_word_embeddings,
forced_eos_token_id=forced_eos_token_id,
decoder_start_token_id=decoder_start_token_id,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
min_length=min_length,
max_length=max_length,
do_sample=do_sample,
**kwargs,
)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get(
"force_bos_token_to_be_generated", False
):
self.forced_bos_token_id = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions."
"The config can simply be saved and uploaded again to be fixed."
)
|
dalle_mini.model.modeling/FlaxBartAttention.setup
|
Modified
|
borisdayma~dalle-mini
|
90e39c4f4fafdbbf0d9b3d37074f7f6c9199e3c9
|
feat: add magneto
|
<14>:<add> if self.config.use_deepnet_scaling:
<add> gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
<del> gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
<15>:<add> self.config
<del> self.config
<16>:<add> )
<del> )
<17>:<add> elif self.config.use_subln_init:
<add> gain = subln_gain["encoder" if self.is_encoder else "decoder"](self.config)
<19>:<del> kernel_init=deepnet_init()
<20>:<del> if self.config.use_deepnet_scaling
<21>:<add> kernel_init=jax.nn.initializers.normal(self.config.init_std)
<del> else jax.nn.initializers.normal(self.config.init_std)
<24>:<del> kernel_init=deepnet_init()
<25>:<del> if self.config.use_deepnet_scaling
<26>:<add> kernel_init=jax.nn.initializers.normal(self.config.init_std)
<del> else jax.nn.initializers.normal(self.config.init_std)
<29>:<add> kernel_init=deepnet_init(self.config.init_std, gain)
<del> kernel_init=deepnet_init(gain)
<30>:<add> if (self.config.use_deepnet_scaling or self.config.use_subln_init)
<del> if self.config.use_deepnet_scaling
<34>:<add> kernel_init=deepnet_init(self.config.init_
|
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def setup(self) -> None:
<0> self.head_dim = self.embed_dim // self.num_heads
<1> if self.head_dim * self.num_heads != self.embed_dim:
<2> raise ValueError(
<3> f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
<4> f" and `num_heads`: {self.num_heads})."
<5> )
<6>
<7> dense = partial(
<8> nn.Dense,
<9> self.embed_dim,
<10> use_bias=self.bias,
<11> dtype=self.dtype,
<12> )
<13>
<14> gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
<15> self.config
<16> )
<17>
<18> self.q_proj = dense(
<19> kernel_init=deepnet_init()
<20> if self.config.use_deepnet_scaling
<21> else jax.nn.initializers.normal(self.config.init_std)
<22> )
<23> self.k_proj = dense(
<24> kernel_init=deepnet_init()
<25> if self.config.use_deepnet_scaling
<26> else jax.nn.initializers.normal(self.config.init_std)
<27> )
<28> self.v_proj = dense(
<29> kernel_init=deepnet_init(gain)
<30> if self.config.use_deepnet_scaling
<31> else jax.nn.initializers.normal(self.config.init_std)
<32> )
<33> self.out_proj = dense(
<34> kernel_init=deepnet_init(gain)
<35> if self.config.use_deepnet_scaling
<36> else jax.nn.initializers.normal(self.config.init_std)
<37> )
<38> self.dropout_layer = nn.Dropout(rate=self.dropout)
<39>
<40> if</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def setup(self) -> None:
# offset: 1
self.head_scale = self.param(
"head_scale", jax.nn.initializers.ones, (1, 1, self.num_heads, 1)
)
if self.config.use_cosine_attention:
# TODO: try using a learnt scale, somehow it immediately diverges in my experiments
self.tau = self.config.tau_init
if self.config.use_swin_position_embeddings:
self.rel_bias = nn.Embed(
self.q_length,
self.k_length * self.num_heads,
embedding_init=deepnet_init()
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)
if self.causal:
# used only in decoder
self.causal_mask = make_causal_mask(
jnp.ones((1, self.config.image_length), dtype="bool"), dtype="bool"
)
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_init(gain=1)
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
at: dalle_mini.model.modeling.FlaxBartAttention
is_encoder: bool = False
q_length: int = None
k_length: int = None
at: functools
partial(func: Callable[..., _T], *args: Any, **kwargs: Any)
partial(func, *args, **keywords, /) -> function with partial application()
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.init_std = init_std
at: transformers.models.bart.modeling_flax_bart.FlaxBartAttention
config: BartConfig
embed_dim: int
num_heads: int
dropout: float = 0.0
causal: bool = False
bias: bool = True
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
setup(self) -> None
===========changed ref 0===========
# module: dalle_mini.model.modeling
# deepnet initialization
+ def deepnet_init(init_std, gain=1):
- def deepnet_init(gain=1):
+ init = jax.nn.initializers.normal(init_std)
- init = jax.nn.initializers.glorot_normal()
def _init(*args, **kwargs):
return gain * init(*args, **kwargs)
return _init
===========changed ref 1===========
<s> implemented yet - from "Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation"
sinkhorn_iters=1, # used in SinkFormers
use_final_ln_encoder=True, # final layer normalization in encoder
use_final_ln_decoder=True, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
force_ln_scale=False, # force scale in layernorm even when followed by dense layers
**kwargs,
):
# text normalizer
self.normalize_text = normalize_text
# transformer variants
self.use_bias = use_bias
assert ln_type in [
"rmsnorm",
"layernorm",
], "ln_type must be 'rmsnorm' or 'layernorm'"
self.ln_type = ln_type
if ln_positions == "deepnet":
ln_positions = "postln"
assert ln_positions in [
"normformer",
"swinv2",
"cogview",
"postln",
"preln",
+ "subln",
+ ], "ln_positions must be 'normformer', 'swinv2', 'cogview', 'postln', 'preln', 'subln'"
- ], "ln_positions must be 'normformer', 'swinv2', 'cogview', 'postln', 'preln'"
self.use_head_scale = use_head_scale
assert use_alibi is False, "use_alibi is not supported yet"
self.ln_positions = ln_positions
self.use_cosine_attention = use_cosine_attention
self.tau_init = tau_init
self.use_absolute_position_embeddings = use_absolute_position_embeddings
self.use_swin_position_embeddings = use_swin_position_embeddings
self.use_deepnet_scaling = use_deepnet_scaling
+ self.use_subln_init = use_subln_init
self.</s>
===========changed ref 2===========
<s>Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation"
sinkhorn_iters=1, # used in SinkFormers
use_final_ln_encoder=True, # final layer normalization in encoder
use_final_ln_decoder=True, # final layer normalization in decoder
# parameters that should not be necessary but could affect results
force_ln_scale=False, # force scale in layernorm even when followed by dense layers
**kwargs,
):
# offset: 1
<s> use_deepnet_scaling
+ self.use_subln_init = use_subln_init
self.use_glu = use_glu
self.use_alibi = use_alibi
self.sinkhorn_iters = sinkhorn_iters
if ln_positions == "postln":
assert (
use_final_ln_encoder
), "use_final_ln_encoder must be True when ln_positions is 'postln'"
assert (
use_final_ln_decoder
), "use_final_ln_decoder must be True when ln_positions is 'postln'"
self.use_final_ln_encoder = use_final_ln_encoder
self.use_final_ln_decoder = use_final_ln_decoder
self.force_ln_scale = force_ln_scale
# common parameters
self.encoder_vocab_size = encoder_vocab_size
self.image_vocab_size = image_vocab_size
self.image_length = image_length
self.max_text_length = max_text_length
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_</s>
|
dalle_mini.model.modeling/FlaxBartAttention.__call__
|
Modified
|
borisdayma~dalle-mini
|
90e39c4f4fafdbbf0d9b3d37074f7f6c9199e3c9
|
feat: add magneto
|
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
<0> """Input shape: Batch x Time x Channel"""
<1>
<2> # if key_value_states are provided this layer is used as a cross-attention layer
<3> # for the decoder
<4> is_cross_attention = key_value_states is not None
<5> batch_size = hidden_states.shape[0]
<6>
<7> # get query proj
<8> query_states = self.q_proj(hidden_states)
<9> # get key, value proj
<10> if is_cross_attention:
<11> # cross_attentions
<12> key_states = self.k_proj(key_value_states)
<13> value_states = self.v_proj(key_value_states)
<14> else:
<15> # self_attention
<16> key_states = self.k_proj(hidden_states)
<17> value_states = self.v_proj(hidden_states)
<18>
<19> query_states = self._split_heads(query_states)
<20> key_states = self._split_heads(key_states)
<21> value_states = self._split_heads(value_states)
<22>
<23> # handle cache prepare causal attention mask
<24> if self.causal:
<25> query_length, key_length = query_states.shape[1], key_states.shape[1]
<26> if self.has_variable("cache", "cached_key"):
<27> mask_shift = self.variables["cache"]["cache_index"]
<28> max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
<29> causal_mask = lax.dynamic_slice(
</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 1
(0, 0, mask_shift, 0),
(1, 1, query_length, max_decoder_length),
)
else:
causal_mask = self.causal_mask[:, :, :query_length, :key_length]
causal_mask = jnp.broadcast_to(
causal_mask, (batch_size,) + causal_mask.shape[1:]
)
# combine masks if needed
if attention_mask is not None and self.causal:
attention_mask = jnp.broadcast_to(
jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape
)
attention_mask = combine_masks(attention_mask, causal_mask)
elif self.causal:
attention_mask = causal_mask
elif attention_mask is not None:
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
key_states, value_states, attention_mask = self._concatenate_to_cache(
key_states, value_states, query_states, attention_mask
)
# Convert the boolean attention mask to an attention bias.
if attention_mask is not None:
# attention mask in the form of</s>
===========below chunk 1===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 2
<s> # Convert the boolean attention mask to an attention bias.
if attention_mask is not None:
# attention mask in the form of attention bias
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, -jnp.inf).astype(self.dtype),
)
else:
attention_bias = None
dropout_rng = None
if not deterministic and self.dropout > 0.0:
dropout_rng = self.make_rng("dropout")
if self.config.use_cosine_attention:
# normalize q and k
query_states = query_states / (
jnp.linalg.norm(query_states, axis=-1, keepdims=True) + 1e-8
)
key_states = key_states / (
jnp.linalg.norm(key_states, axis=-1, keepdims=True) + 1e-8
)
# relative position embeddings
if self.config.use_swin_position_embeddings:
position_ids = jnp.arange(self.q_length)
embed_pos = self.rel_bias(position_ids)
embed_pos = rearrange(embed_pos, "q (k h) -> 1 h q k", h=self.num_heads)
else:
</s>
===========below chunk 2===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 3
<s>_pos = None
tau = self.tau if self.config.use_cosine_attention else None
attn_weights = dot_product_attention_weights(
query_states,
key_states,
bias=attention_bias,
mask=attention_mask,
embed_pos=embed_pos,
dropout_rng=dropout_rng,
dropout_rate=self.dropout,
broadcast_dropout=True,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
sinkhorn_iters=self.config.sinkhorn_iters,
is_encoder=self.is_encoder,
tau=tau,
)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
if self.config.use_head_scale:
# per Normformer
attn_output = attn_output * self.head_scale
attn_output = self._merge_heads(attn_output)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
===========unchanged ref 0===========
at: dalle_mini.model.modeling
dot_product_attention_weights(query: Any, key: Any, bias: Optional[Any]=None, mask: Optional[Any]=None, embed_pos: Optional[Any]=None, broadcast_dropout: bool=True, dropout_rng: Optional[PRNGKey]=None, dropout_rate: float=0.0, deterministic: bool=False, dtype: Any=jnp.float32, precision: PrecisionLike=None, sinkhorn_iters: int=1, is_encoder: bool=False, tau=None)
at: dalle_mini.model.modeling.FlaxBartAttention
q_length: int = None
at: dalle_mini.model.modeling.FlaxBartAttention.setup
self.q_proj = dense(
kernel_init=deepnet_init()
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.k_proj = dense(
kernel_init=deepnet_init()
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.v_proj = dense(
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.tau = self.config.tau_init
self.rel_bias = nn.Embed(
self.q_length,
self.k_length * self.num_heads,
embedding_init=deepnet_init()
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)
self.causal_mask = make_causal_mask(
jnp.ones((1, self.config.image_length), dtype="bool"), dtype="bool"
)
|
|
dalle_mini.model.modeling/GLU.__call__
|
Modified
|
borisdayma~dalle-mini
|
90e39c4f4fafdbbf0d9b3d37074f7f6c9199e3c9
|
feat: add magneto
|
<0>:<add> if self.config.use_deepnet_scaling:
<add> gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
<del> gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
<1>:<add> self.config
<del> self.config
<2>:<add> )
<del> )
<3>:<add> elif self.config.use_subln_init:
<add> gain = subln_gain["encoder" if self.is_encoder else "decoder"](self.config)
<4>:<add> if self.config.ln_positions in ["normformer", "cogview", "preln", "subln"]:
<del> if self.config.ln_positions in ["normformer", "cogview", "preln"]:
<15>:<add> kernel_init=deepnet_init(self.config.init_std, gain)
<del> kernel_init=deepnet_init(gain)
<16>:<add> if (self.config.use_deepnet_scaling or self.config.use_subln_init)
<del> if self.config.use_deepnet_scaling
<24>:<add> kernel_init=deepnet_init(self.config.init_std, gain)
<del> kernel_init=deepnet_init(gain)
<25>:<add> if (self.config.use_deepnet_scaling or self.config.use_subln_
|
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
<0> gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
<1> self.config
<2> )
<3>
<4> if self.config.ln_positions in ["normformer", "cogview", "preln"]:
<5> x = norm(
<6> self.config.ln_type,
<7> dtype=self.dtype,
<8> epsilon=1e-05,
<9> use_scale=self.config.force_ln_scale,
<10> )(x)
<11> w = nn.Dense(
<12> self.ffn_dim,
<13> dtype=self.dtype,
<14> use_bias=self.config.use_bias,
<15> kernel_init=deepnet_init(gain)
<16> if self.config.use_deepnet_scaling
<17> else jax.nn.initializers.normal(self.config.init_std),
<18> )(x)
<19> w = ACT2FN[self.config.activation_function](w)
<20> v = nn.Dense(
<21> self.ffn_dim,
<22> dtype=self.dtype,
<23> use_bias=self.config.use_bias,
<24> kernel_init=deepnet_init(gain)
<25> if self.config.use_deepnet_scaling
<26> else jax.nn.initializers.normal(self.config.init_std),
<27> )(x)
<28> x = w * v
<29> if self.config.ln_positions in ["normformer"]:
<30> x = norm(
<31> self.config.ln_type,
<32> dtype=self.dtype,
<33> epsilon=1e-05,
<34> use_scale=self.config.force_ln_scale,
<35> )(x)
<36> x = nn.Dropout(rate=self.config.activation_dropout)(
<37> x</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
)
x = nn.Dense(
self.embed_dim,
dtype=self.dtype,
use_bias=self.config.use_bias,
kernel_init=deepnet_init(gain)
if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
if self.config.ln_positions in ["swinv2", "cogview"]:
x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic)
return x
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_init(gain=1)
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: dalle_mini.model.modeling.GLU
config: DalleBartConfig
ffn_dim: int
embed_dim: int
dtype: jnp.dtype = jnp.float32
is_encoder: bool = False
at: transformers.modeling_flax_utils
ACT2FN = {
"gelu": partial(nn.gelu, approximate=False),
"relu": nn.relu,
"silu": nn.swish,
"swish": nn.swish,
"gelu_new": partial(nn.gelu, approximate=True),
"quick_gelu": quick_gelu,
}
===========changed ref 0===========
# module: dalle_mini.model.modeling
# deepnet initialization
+ def deepnet_init(init_std, gain=1):
- def deepnet_init(gain=1):
+ init = jax.nn.initializers.normal(init_std)
- init = jax.nn.initializers.glorot_normal()
def _init(*args, **kwargs):
return gain * init(*args, **kwargs)
return _init
===========changed ref 1===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def setup(self) -> None:
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {self.num_heads})."
)
dense = partial(
nn.Dense,
self.embed_dim,
use_bias=self.bias,
dtype=self.dtype,
)
+ if self.config.use_deepnet_scaling:
+ gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
- gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
+ self.config
- self.config
+ )
- )
+ elif self.config.use_subln_init:
+ gain = subln_gain["encoder" if self.is_encoder else "decoder"](self.config)
self.q_proj = dense(
- kernel_init=deepnet_init()
- if self.config.use_deepnet_scaling
+ kernel_init=jax.nn.initializers.normal(self.config.init_std)
- else jax.nn.initializers.normal(self.config.init_std)
)
self.k_proj = dense(
- kernel_init=deepnet_init()
- if self.config.use_deepnet_scaling
+ kernel_init=jax.nn.initializers.normal(self.config.init_std)
- else jax.nn.initializers.normal(self.config.init_std)
)
self.v_proj = dense(
+ kernel_init=deepnet_init(self.config.init_std,</s>
===========changed ref 2===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def setup(self) -> None:
# offset: 1
<s>
self.v_proj = dense(
+ kernel_init=deepnet_init(self.config.init_std, gain)
- kernel_init=deepnet_init(gain)
+ if (self.config.use_deepnet_scaling or self.config.use_subln_init)
- if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.out_proj = dense(
+ kernel_init=deepnet_init(self.config.init_std, gain)
- kernel_init=deepnet_init(gain)
+ if (self.config.use_deepnet_scaling or self.config.use_subln_init)
- if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std)
)
self.dropout_layer = nn.Dropout(rate=self.dropout)
if self.config.use_head_scale:
self.head_scale = self.param(
"head_scale", jax.nn.initializers.ones, (1, 1, self.num_heads, 1)
)
if self.config.use_cosine_attention:
# TODO: try using a learnt scale, somehow it immediately diverges in my experiments
self.tau = self.config.tau_init
if self.config.use_swin_position_embeddings:
self.rel_bias = nn.Embed(
self.q_length,
self.k_length * self.num_heads,
- embedding_init=deepnet_init()
- if self.config.use_deepnet_scaling
+ embedding_init</s>
===========changed ref 3===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def setup(self) -> None:
# offset: 2
<s>.nn.initializers.normal(self.config.init_std),
- else jax.nn.initializers.normal(self.config.init_std),
)
if self.causal:
# used only in decoder
self.causal_mask = make_causal_mask(
jnp.ones((1, self.config.image_length), dtype="bool"), dtype="bool"
)
|
dalle_mini.model.modeling/FFN.__call__
|
Modified
|
borisdayma~dalle-mini
|
90e39c4f4fafdbbf0d9b3d37074f7f6c9199e3c9
|
feat: add magneto
|
<0>:<add> if self.config.use_deepnet_scaling:
<add> gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
<del> gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
<1>:<add> self.config
<del> self.config
<2>:<add> )
<del> )
<3>:<add> elif self.config.use_subln_init:
<add> gain = subln_gain["encoder" if self.is_encoder else "decoder"](self.config)
<add> if self.config.ln_positions in ["normformer", "cogview", "preln", "subln"]:
<del> if self.config.ln_positions in ["normformer", "cogview", "preln"]:
<14>:<add> kernel_init=deepnet_init(self.config.init_std, gain)
<del> kernel_init=deepnet_init(gain)
<15>:<add> if (self.config.use_deepnet_scaling or self.config.use_subln_init)
<del> if self.config.use_deepnet_scaling
<19>:<add> if self.config.ln_positions in ["normformer", "subln"]:
<del> if self.config.ln_positions in ["normformer"]:
<33>:<add> kernel_init=deepnet_init(self.config.init_
|
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
<0> gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
<1> self.config
<2> )
<3> if self.config.ln_positions in ["normformer", "cogview", "preln"]:
<4> x = norm(
<5> self.config.ln_type,
<6> dtype=self.dtype,
<7> epsilon=1e-05,
<8> use_scale=self.config.force_ln_scale,
<9> )(x)
<10> x = nn.Dense(
<11> self.ffn_dim,
<12> dtype=self.dtype,
<13> use_bias=self.config.use_bias,
<14> kernel_init=deepnet_init(gain)
<15> if self.config.use_deepnet_scaling
<16> else jax.nn.initializers.normal(self.config.init_std),
<17> )(x)
<18> x = ACT2FN[self.config.activation_function](x)
<19> if self.config.ln_positions in ["normformer"]:
<20> x = norm(
<21> self.config.ln_type,
<22> dtype=self.dtype,
<23> epsilon=1e-05,
<24> use_scale=self.config.force_ln_scale,
<25> )(x)
<26> x = nn.Dropout(rate=self.config.activation_dropout)(
<27> x, deterministic=deterministic
<28> )
<29> x = nn.Dense(
<30> self.embed_dim,
<31> dtype=self.dtype,
<32> use_bias=self.config.use_bias,
<33> kernel_init=deepnet_init(gain)
<34> if self.config.use_deepnet_scaling
<35> else jax.nn.initializers.normal(self.config.init_std),
<36> )(x)
</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic)
return x
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_init(gain=1)
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: dalle_mini.model.modeling.FFN
config: DalleBartConfig
ffn_dim: int
embed_dim: int
dtype: jnp.dtype = jnp.float32
is_encoder: bool = False
at: transformers.modeling_flax_utils
ACT2FN = {
"gelu": partial(nn.gelu, approximate=False),
"relu": nn.relu,
"silu": nn.swish,
"swish": nn.swish,
"gelu_new": partial(nn.gelu, approximate=True),
"quick_gelu": quick_gelu,
}
===========changed ref 0===========
# module: dalle_mini.model.modeling
# deepnet initialization
+ def deepnet_init(init_std, gain=1):
- def deepnet_init(gain=1):
+ init = jax.nn.initializers.normal(init_std)
- init = jax.nn.initializers.glorot_normal()
def _init(*args, **kwargs):
return gain * init(*args, **kwargs)
return _init
===========changed ref 1===========
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
+ if self.config.use_deepnet_scaling:
+ gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
- gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
+ self.config
- self.config
+ )
- )
+ elif self.config.use_subln_init:
+ gain = subln_gain["encoder" if self.is_encoder else "decoder"](self.config)
+ if self.config.ln_positions in ["normformer", "cogview", "preln", "subln"]:
- if self.config.ln_positions in ["normformer", "cogview", "preln"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(x)
w = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=self.config.use_bias,
+ kernel_init=deepnet_init(self.config.init_std, gain)
- kernel_init=deepnet_init(gain)
+ if (self.config.use_deepnet_scaling or self.config.use_subln_init)
- if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
w = ACT2FN[self.config.activation_function](w)
v = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=self.config.use_bias,
+ kernel_init=deep</s>
===========changed ref 2===========
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
<s>
dtype=self.dtype,
use_bias=self.config.use_bias,
+ kernel_init=deepnet_init(self.config.init_std, gain)
- kernel_init=deepnet_init(gain)
+ if (self.config.use_deepnet_scaling or self.config.use_subln_init)
- if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
x = w * v
+ if self.config.ln_positions in ["normformer", "subln"]:
- if self.config.ln_positions in ["normformer"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(x)
x = nn.Dropout(rate=self.config.activation_dropout)(
x, deterministic=deterministic
)
x = nn.Dense(
self.embed_dim,
dtype=self.dtype,
use_bias=self.config.use_bias,
+ kernel_init=deepnet_init(self.config.init_std, gain)
- kernel_init=deepnet_init(gain)
+ if (self.config.use_deepnet_scaling or self.config.use_subln_init)
- if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
if self.config.ln_positions in ["swinv</s>
===========changed ref 3===========
# module: dalle_mini.model.modeling
class GLU(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 2
<s> "cogview"]:
x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic)
return x
|
dalle_mini.model.modeling/FlaxBartEncoderLayer.__call__
|
Modified
|
borisdayma~dalle-mini
|
90e39c4f4fafdbbf0d9b3d37074f7f6c9199e3c9
|
feat: add magneto
|
<11>:<add> if self.config.ln_positions in ["normformer", "cogview", "preln", "subln"]:
<del> if self.config.ln_positions in ["normformer", "cogview", "preln"]:
|
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
<0> if self.config.use_scan:
<1> hidden_states = hidden_states[0]
<2>
<3> res_gain = (
<4> deepnet_gain["encoder"]["alpha"](self.config)
<5> if self.config.use_deepnet_scaling
<6> else 1
<7> )
<8>
<9> embed_dim = self.config.d_model
<10> residual = hidden_states
<11> if self.config.ln_positions in ["normformer", "cogview", "preln"]:
<12> hidden_states = norm(
<13> self.config.ln_type,
<14> dtype=self.dtype,
<15> epsilon=1e-05,
<16> use_scale=self.config.force_ln_scale,
<17> )(hidden_states)
<18> hidden_states, attn_weights = FlaxBartAttention(
<19> config=self.config,
<20> embed_dim=embed_dim,
<21> num_heads=self.config.encoder_attention_heads,
<22> dropout=self.config.attention_dropout,
<23> bias=self.config.use_bias,
<24> dtype=self.dtype,
<25> is_encoder=True,
<26> q_length=self.config.max_text_length,
<27> k_length=self.config.max_text_length,
<28> )(hidden_states=hidden_states, attention_mask=attention_mask)
<29>
<30> if self.config.ln_positions in ["normformer", "swinv2", "cogview"]:
<31> hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
<32> hidden_states
<33> )</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 1
hidden_states, deterministic=deterministic
)
hidden_states = residual * res_gain + hidden_states
if self.config.ln_positions in ["postln"]:
hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
hidden_states
)
residual = hidden_states
ff_block = (
GLU(
config=self.config,
ffn_dim=self.config.encoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=True,
)
if self.config.use_glu
else FFN(
config=self.config,
ffn_dim=self.config.encoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=True,
)
)
hidden_states = ff_block(hidden_states, deterministic=deterministic)
hidden_states = residual * res_gain + hidden_states
if self.add_norm:
use_scale = self.use_scale or self.config.force_ln_scale
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=use_scale,
)(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
if self.config.use_scan:
outputs = (outputs, None)
return outputs
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: dalle_mini.model.modeling.FlaxBartEncoderLayer
config: DalleBartConfig
dtype: jnp.dtype = jnp.float32
add_norm: bool = False
use_scale: bool = True
at: typing
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
===========changed ref 0===========
# module: dalle_mini.model.modeling
# deepnet initialization
+ def deepnet_init(init_std, gain=1):
- def deepnet_init(gain=1):
+ init = jax.nn.initializers.normal(init_std)
- init = jax.nn.initializers.glorot_normal()
def _init(*args, **kwargs):
return gain * init(*args, **kwargs)
return _init
===========changed ref 1===========
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
+ if self.config.use_deepnet_scaling:
+ gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
- gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
+ self.config
- self.config
+ )
- )
+ elif self.config.use_subln_init:
+ gain = subln_gain["encoder" if self.is_encoder else "decoder"](self.config)
+ if self.config.ln_positions in ["normformer", "cogview", "preln", "subln"]:
- if self.config.ln_positions in ["normformer", "cogview", "preln"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(x)
x = nn.Dense(
self.ffn_dim,
dtype=self.dtype,
use_bias=self.config.use_bias,
+ kernel_init=deepnet_init(self.config.init_std, gain)
- kernel_init=deepnet_init(gain)
+ if (self.config.use_deepnet_scaling or self.config.use_subln_init)
- if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
x = ACT2FN[self.config.activation_function](x)
+ if self.config.ln_positions in ["normformer", "subln"]:
- if self.config.ln_positions in ["normformer"]:
x = norm(
self.</s>
===========changed ref 2===========
# module: dalle_mini.model.modeling
class FFN(nn.Module):
@nn.compact
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
# offset: 1
<s>n"]:
- if self.config.ln_positions in ["normformer"]:
x = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(x)
x = nn.Dropout(rate=self.config.activation_dropout)(
x, deterministic=deterministic
)
x = nn.Dense(
self.embed_dim,
dtype=self.dtype,
use_bias=self.config.use_bias,
+ kernel_init=deepnet_init(self.config.init_std, gain)
- kernel_init=deepnet_init(gain)
+ if (self.config.use_deepnet_scaling or self.config.use_subln_init)
- if self.config.use_deepnet_scaling
else jax.nn.initializers.normal(self.config.init_std),
)(x)
if self.config.ln_positions in ["swinv2", "cogview"]:
x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x)
x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic)
return x
|
dalle_mini.model.modeling/FlaxBartAttention.setup
|
Modified
|
borisdayma~dalle-mini
|
12c49e10cb33fe9d5149955a6433153172ae8145
|
fix: remove magneto cross-attn's ln and init (#311)
|
<18>:<add> elif self.config.use_subln_init and not self.is_cross_attention:
<del> elif self.config.use_subln_init:
<29>:<add> if (
<add> self.config.use_deepnet_scaling
<add> or (self.config.use_subln_init and not self.is_cross_attention)
<add> )
<del> if (self.config.use_deepnet_scaling or self.config.use_subln_init)
<34>:<add> if (
<add> self.config.use_deepnet_scaling
<add> or (self.config.use_subln_init and not self.is_cross_attention)
<add> )
<del> if (self.config.use_deepnet_scaling or self.config.use_s
|
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def setup(self) -> None:
<0> self.head_dim = self.embed_dim // self.num_heads
<1> if self.head_dim * self.num_heads != self.embed_dim:
<2> raise ValueError(
<3> f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
<4> f" and `num_heads`: {self.num_heads})."
<5> )
<6>
<7> dense = partial(
<8> nn.Dense,
<9> self.embed_dim,
<10> use_bias=self.bias,
<11> dtype=self.dtype,
<12> )
<13>
<14> if self.config.use_deepnet_scaling:
<15> gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
<16> self.config
<17> )
<18> elif self.config.use_subln_init:
<19> gain = subln_gain["encoder" if self.is_encoder else "decoder"](self.config)
<20>
<21> self.q_proj = dense(
<22> kernel_init=jax.nn.initializers.normal(self.config.init_std)
<23> )
<24> self.k_proj = dense(
<25> kernel_init=jax.nn.initializers.normal(self.config.init_std)
<26> )
<27> self.v_proj = dense(
<28> kernel_init=deepnet_init(self.config.init_std, gain)
<29> if (self.config.use_deepnet_scaling or self.config.use_subln_init)
<30> else jax.nn.initializers.normal(self.config.init_std)
<31> )
<32> self.out_proj = dense(
<33> kernel_init=deepnet_init(self.config.init_std, gain)
<34> if (self.config.use_deepnet_scaling or self.config.use_s</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def setup(self) -> None:
# offset: 1
else jax.nn.initializers.normal(self.config.init_std)
)
self.dropout_layer = nn.Dropout(rate=self.dropout)
if self.config.use_head_scale:
self.head_scale = self.param(
"head_scale", jax.nn.initializers.ones, (1, 1, self.num_heads, 1)
)
if self.config.use_cosine_attention:
# TODO: try using a learnt scale, somehow it immediately diverges in my experiments
self.tau = self.config.tau_init
if self.config.use_swin_position_embeddings:
self.rel_bias = nn.Embed(
self.q_length,
self.k_length * self.num_heads,
embedding_init=jax.nn.initializers.normal(self.config.init_std),
)
if self.causal:
# used only in decoder
self.causal_mask = make_causal_mask(
jnp.ones((1, self.config.image_length), dtype="bool"), dtype="bool"
)
if self.config.ln_positions in ["subln"]:
self.mid_layernorm = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05
)
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_init(init_std, gain=1)
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
subln_gain = {
"encoder": lambda config: math.sqrt(
1.0
/ 3.0
* math.log(3 * config.decoder_layers)
* math.log(2 * config.encoder_layers)
),
"decoder": lambda config: math.sqrt(math.log(3 * config.decoder_layers)),
}
at: dalle_mini.model.modeling.FlaxBartAttention
is_encoder: bool = False
is_cross_attention: bool = False
q_length: int = None
k_length: int = None
at: functools
partial(func: Callable[..., _T], *args: Any, **kwargs: Any)
partial(func, *args, **keywords, /) -> function with partial application()
at: transformers.models.bart.configuration_bart.BartConfig.__init__
self.init_std = init_std
at: transformers.models.bart.modeling_flax_bart.FlaxBartAttention
config: BartConfig
embed_dim: int
num_heads: int
dropout: float = 0.0
causal: bool = False
bias: bool = True
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
===========unchanged ref 1===========
setup(self) -> None
===========changed ref 0===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
"""
Edits:
- causal mask is used only in decoder and considers image_length
- scale attention heads per NormFormer paper
"""
is_encoder: bool = False
+ is_cross_attention: bool = False
q_length: int = None
k_length: int = None
|
dalle_mini.model.modeling/FlaxBartAttention.__call__
|
Modified
|
borisdayma~dalle-mini
|
12c49e10cb33fe9d5149955a6433153172ae8145
|
fix: remove magneto cross-attn's ln and init (#311)
|
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
<0> """Input shape: Batch x Time x Channel"""
<1>
<2> # if key_value_states are provided this layer is used as a cross-attention layer
<3> # for the decoder
<4> is_cross_attention = key_value_states is not None
<5> batch_size = hidden_states.shape[0]
<6>
<7> # get query proj
<8> query_states = self.q_proj(hidden_states)
<9> # get key, value proj
<10> if is_cross_attention:
<11> # cross_attentions
<12> key_states = self.k_proj(key_value_states)
<13> value_states = self.v_proj(key_value_states)
<14> else:
<15> # self_attention
<16> key_states = self.k_proj(hidden_states)
<17> value_states = self.v_proj(hidden_states)
<18>
<19> query_states = self._split_heads(query_states)
<20> key_states = self._split_heads(key_states)
<21> value_states = self._split_heads(value_states)
<22>
<23> # handle cache prepare causal attention mask
<24> if self.causal:
<25> query_length, key_length = query_states.shape[1], key_states.shape[1]
<26> if self.has_variable("cache", "cached_key"):
<27> mask_shift = self.variables["cache"]["cache_index"]
<28> max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
<29> causal_mask = lax.dynamic_slice(
</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 1
(0, 0, mask_shift, 0),
(1, 1, query_length, max_decoder_length),
)
else:
causal_mask = self.causal_mask[:, :, :query_length, :key_length]
causal_mask = jnp.broadcast_to(
causal_mask, (batch_size,) + causal_mask.shape[1:]
)
# combine masks if needed
if attention_mask is not None and self.causal:
attention_mask = jnp.broadcast_to(
jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape
)
attention_mask = combine_masks(attention_mask, causal_mask)
elif self.causal:
attention_mask = causal_mask
elif attention_mask is not None:
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
key_states, value_states, attention_mask = self._concatenate_to_cache(
key_states, value_states, query_states, attention_mask
)
# Convert the boolean attention mask to an attention bias.
if attention_mask is not None:
# attention mask in the form of</s>
===========below chunk 1===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 2
<s> # Convert the boolean attention mask to an attention bias.
if attention_mask is not None:
# attention mask in the form of attention bias
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, -jnp.inf).astype(self.dtype),
)
else:
attention_bias = None
dropout_rng = None
if not deterministic and self.dropout > 0.0:
dropout_rng = self.make_rng("dropout")
if self.config.use_cosine_attention:
# normalize q and k
query_states = query_states / (
jnp.linalg.norm(query_states, axis=-1, keepdims=True) + 1e-8
)
key_states = key_states / (
jnp.linalg.norm(key_states, axis=-1, keepdims=True) + 1e-8
)
# relative position embeddings
if self.config.use_swin_position_embeddings:
position_ids = jnp.arange(self.q_length)
embed_pos = self.rel_bias(position_ids)
embed_pos = rearrange(embed_pos, "q (k h) -> 1 h q k", h=self.num_heads)
else:
</s>
===========below chunk 2===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 3
<s>_pos = None
tau = self.tau if self.config.use_cosine_attention else None
attn_weights = dot_product_attention_weights(
query_states,
key_states,
bias=attention_bias,
mask=attention_mask,
embed_pos=embed_pos,
dropout_rng=dropout_rng,
dropout_rate=self.dropout,
broadcast_dropout=True,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
sinkhorn_iters=self.config.sinkhorn_iters,
is_encoder=self.is_encoder,
tau=tau,
)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
if self.config.use_head_scale:
# per Normformer
attn_output = attn_output * self.head_scale
attn_output = self._merge_heads(attn_output)
if self.config.ln_positions in ["subln"]:
attn_output = self.mid_layernorm(attn_output)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
===========unchanged ref 0===========
at: dalle_mini.model.modeling
norm(type, *args, **kwargs)
dot_product_attention_weights(query: Any, key: Any, bias: Optional[Any]=None, mask: Optional[Any]=None, embed_pos: Optional[Any]=None, broadcast_dropout: bool=True, dropout_rng: Optional[PRNGKey]=None, dropout_rate: float=0.0, deterministic: bool=False, dtype: Any=jnp.float32, precision: PrecisionLike=None, sinkhorn_iters: int=1, is_encoder: bool=False, tau=None)
at: dalle_mini.model.modeling.FlaxBartAttention
is_cross_attention: bool = False
q_length: int = None
at: dalle_mini.model.modeling.FlaxBartAttention.setup
self.q_proj = dense(
kernel_init=jax.nn.initializers.normal(self.config.init_std)
)
self.k_proj = dense(
kernel_init=jax.nn.initializers.normal(self.config.init_std)
)
self.v_proj = dense(
kernel_init=deepnet_init(self.config.init_std, gain)
if (
self.config.use_deepnet_scaling
or (self.config.use_subln_init and not self.is_cross_attention)
)
else jax.nn.initializers.normal(self.config.init_std)
)
self.tau = self.config.tau_init
self.rel_bias = nn.Embed(
self.q_length,
self.k_length * self.num_heads,
embedding_init=jax.nn.initializers.normal(self.config.init_std),
)
self.causal_mask = make_causal_mask(
jnp.ones((1, self.config.image_length), dtype="bool"), dtype="bool"
)
|
|
dalle_mini.model.modeling/FlaxBartEncoderLayer.__call__
|
Modified
|
borisdayma~dalle-mini
|
12c49e10cb33fe9d5149955a6433153172ae8145
|
fix: remove magneto cross-attn's ln and init (#311)
|
<26>:<add> is_cross_attention=False,
|
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
<0> if self.config.use_scan:
<1> hidden_states = hidden_states[0]
<2>
<3> res_gain = (
<4> deepnet_gain["encoder"]["alpha"](self.config)
<5> if self.config.use_deepnet_scaling
<6> else 1
<7> )
<8>
<9> embed_dim = self.config.d_model
<10> residual = hidden_states
<11> if self.config.ln_positions in ["normformer", "cogview", "preln", "subln"]:
<12> hidden_states = norm(
<13> self.config.ln_type,
<14> dtype=self.dtype,
<15> epsilon=1e-05,
<16> use_scale=self.config.force_ln_scale,
<17> )(hidden_states)
<18> hidden_states, attn_weights = FlaxBartAttention(
<19> config=self.config,
<20> embed_dim=embed_dim,
<21> num_heads=self.config.encoder_attention_heads,
<22> dropout=self.config.attention_dropout,
<23> bias=self.config.use_bias,
<24> dtype=self.dtype,
<25> is_encoder=True,
<26> q_length=self.config.max_text_length,
<27> k_length=self.config.max_text_length,
<28> )(hidden_states=hidden_states, attention_mask=attention_mask)
<29>
<30> if self.config.ln_positions in ["normformer", "swinv2", "cogview"]:
<31> hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
<32> hidden_</s>
|
===========below chunk 0===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 1
)
hidden_states = nn.Dropout(rate=self.config.dropout)(
hidden_states, deterministic=deterministic
)
hidden_states = residual * res_gain + hidden_states
if self.config.ln_positions in ["postln"]:
hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
hidden_states
)
residual = hidden_states
ff_block = (
GLU(
config=self.config,
ffn_dim=self.config.encoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=True,
)
if self.config.use_glu
else FFN(
config=self.config,
ffn_dim=self.config.encoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=True,
)
)
hidden_states = ff_block(hidden_states, deterministic=deterministic)
hidden_states = residual * res_gain + hidden_states
if self.add_norm:
use_scale = self.use_scale or self.config.force_ln_scale
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=use_scale,
)(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
if self.config.</s>
===========below chunk 1===========
# module: dalle_mini.model.modeling
class FlaxBartEncoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 2
<s>,)
if output_attentions:
outputs += (attn_weights,)
if self.config.use_scan:
outputs = (outputs, None)
return outputs
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: typing
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
===========changed ref 0===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
"""
Edits:
- causal mask is used only in decoder and considers image_length
- scale attention heads per NormFormer paper
"""
is_encoder: bool = False
+ is_cross_attention: bool = False
q_length: int = None
k_length: int = None
===========changed ref 1===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def setup(self) -> None:
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {self.num_heads})."
)
dense = partial(
nn.Dense,
self.embed_dim,
use_bias=self.bias,
dtype=self.dtype,
)
if self.config.use_deepnet_scaling:
gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"](
self.config
)
+ elif self.config.use_subln_init and not self.is_cross_attention:
- elif self.config.use_subln_init:
gain = subln_gain["encoder" if self.is_encoder else "decoder"](self.config)
self.q_proj = dense(
kernel_init=jax.nn.initializers.normal(self.config.init_std)
)
self.k_proj = dense(
kernel_init=jax.nn.initializers.normal(self.config.init_std)
)
self.v_proj = dense(
kernel_init=deepnet_init(self.config.init_std, gain)
+ if (
+ self.config.use_deepnet_scaling
+ or (self.config.use_subln_init and not self.is_cross_attention)
+ )
- if (self.config.use_deepnet_scaling or self.config.use_subln_init)
else jax.nn.initializers.normal(self.config.init_std)
)
self.out_proj = dense(
kernel</s>
===========changed ref 2===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
def setup(self) -> None:
# offset: 1
<s>.initializers.normal(self.config.init_std)
)
self.out_proj = dense(
kernel_init=deepnet_init(self.config.init_std, gain)
+ if (
+ self.config.use_deepnet_scaling
+ or (self.config.use_subln_init and not self.is_cross_attention)
+ )
- if (self.config.use_deepnet_scaling or self.config.use_subln_init)
else jax.nn.initializers.normal(self.config.init_std)
)
self.dropout_layer = nn.Dropout(rate=self.dropout)
if self.config.use_head_scale:
self.head_scale = self.param(
"head_scale", jax.nn.initializers.ones, (1, 1, self.num_heads, 1)
)
if self.config.use_cosine_attention:
# TODO: try using a learnt scale, somehow it immediately diverges in my experiments
self.tau = self.config.tau_init
if self.config.use_swin_position_embeddings:
self.rel_bias = nn.Embed(
self.q_length,
self.k_length * self.num_heads,
embedding_init=jax.nn.initializers.normal(self.config.init_std),
)
if self.causal:
# used only in decoder
self.causal_mask = make_causal_mask(
jnp.ones((1, self.config.image_length), dtype="bool"), dtype="bool"
)
+ if self.config.ln_positions in ["subln</s>
|
dalle_mini.model.modeling/FlaxBartDecoderLayer.__call__
|
Modified
|
borisdayma~dalle-mini
|
12c49e10cb33fe9d5149955a6433153172ae8145
|
fix: remove magneto cross-attn's ln and init (#311)
|
<29>:<add> is_cross_attention=False,
|
<s> FlaxBartDecoderLayer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
<0> if self.config.use_scan:
<1> hidden_states = hidden_states[0]
<2>
<3> res_gain = (
<4> deepnet_gain["decoder"]["alpha"](self.config)
<5> if self.config.use_deepnet_scaling
<6> else 1
<7> )
<8>
<9> embed_dim = self.config.d_model
<10> residual = hidden_states
<11>
<12> # Self Attention
<13> if self.config.ln_positions in ["normformer", "cogview", "preln"]:
<14> hidden_states = norm(
<15> self.config.ln_type,
<16> dtype=self.dtype,
<17> epsilon=1e-05,
<18> use_scale=self.config.force_ln_scale,
<19> )(hidden_states)
<20> hidden_states, attn_weights = FlaxBartAttention(
<21> config=self.config,
<22> embed_dim=embed_dim,
<23> num_heads=self.config.decoder_attention_heads,
<24> dropout=self.config.attention_dropout,
<25> causal=True,
<26> bias=self.config.use_bias,
<27> dtype=self.dtype,
<28> is_encoder=False,
<29> q_length=self.config.image_length,
<30> k_length=self.config.image_length,
<31> )(
<32> hidden_states=hidden_states,
<33> attention_mask=attention_mask,
<34> init_cache=init_cache,
<35> )
<36> </s>
|
===========below chunk 0===========
<s>Layer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 1
hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
hidden_states
)
hidden_states = nn.Dropout(rate=self.config.dropout)(
hidden_states, deterministic=deterministic
)
hidden_states = residual * res_gain + hidden_states
if self.config.ln_positions in ["postln"]:
hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(
hidden_states
)
# Cross Attention
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
if self.config.ln_positions in ["normformer", "cogview", "preln"]:
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=self.config.force_ln_scale,
)(hidden_states)
hidden_states, cross_attn_weights = FlaxBartAttention(
config=self.config,
embed_dim=embed_dim,
num_heads=self.config.decoder_attention_heads,
dropout=self.config.attention_dropout,
bias=self.config.use_bias,
dtype=self.dtype,
is_encoder=False,
q_length=self.config.image_length,
k_length=self.config.max_text_length,
</s>
===========below chunk 1===========
<s>Layer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 2
<s> q_length=self.config.image_length,
k_length=self.config.max_text_length,
)(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
)
if self.config.ln_positions in ["normformer", "swinv2", "cogview"]:
hidden_states = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05
)(hidden_states)
hidden_states = nn.Dropout(rate=self.config.dropout)(
hidden_states, deterministic=deterministic
)
hidden_states = residual * res_gain + hidden_states
if self.config.ln_positions in ["postln"]:
hidden_states = norm(
self.config.ln_type, dtype=self.dtype, epsilon=1e-05
)(hidden_states)
# Feed forward
residual = hidden_states
ff_block = (
GLU(
config=self.config,
ffn_dim=self.config.decoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=False,
)
if self.config.use_glu
else FFN(
config=self.config,
ffn_dim=self</s>
===========below chunk 2===========
<s>Layer(nn.Module):
@nn.compact
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
# offset: 3
<s>.decoder_ffn_dim,
embed_dim=embed_dim,
dtype=self.dtype,
is_encoder=False,
)
)
hidden_states = ff_block(hidden_states, deterministic=deterministic)
hidden_states = residual * res_gain + hidden_states
if self.add_norm:
use_scale = self.use_scale or self.config.force_ln_scale
hidden_states = norm(
self.config.ln_type,
dtype=self.dtype,
epsilon=1e-05,
use_scale=use_scale,
)(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights, cross_attn_weights)
if self.config.use_scan:
outputs = (outputs, None)
return outputs
===========unchanged ref 0===========
at: dalle_mini.model.modeling
deepnet_gain = {
"encoder": {
"alpha": lambda config: 0.81
* (config.encoder_layers**4 * config.decoder_layers) ** 0.0625,
"beta": lambda config: 0.87
* (config.encoder_layers**4 * config.decoder_layers) ** -0.0625,
},
"decoder": {
"alpha": lambda config: (3 * config.decoder_layers) ** 0.25,
"beta": lambda config: (12 * config.decoder_layers) ** -0.25,
},
}
norm(type, *args, **kwargs)
at: typing
Tuple = _TupleType(tuple, -1, inst=False, name='Tuple')
===========changed ref 0===========
# module: dalle_mini.model.modeling
class FlaxBartAttention(FlaxBartAttention):
"""
Edits:
- causal mask is used only in decoder and considers image_length
- scale attention heads per NormFormer paper
"""
is_encoder: bool = False
+ is_cross_attention: bool = False
q_length: int = None
k_length: int = None
|
aioquic.packet/QuicHeader.parse
|
Modified
|
aiortc~aioquic
|
3167b70e32c9e83ef7759a38e6328e4e70ad3708
|
[crypto] add encrypt_packet function
|
<5>:<add> if is_long_header(first_byte):
<del> if first_byte & PACKET_LONG_HEADER:
|
# module: aioquic.packet
@dataclass
class QuicHeader:
@classmethod
def parse(cls, data):
<0> datagram_length = len(data)
<1> if datagram_length < 2:
<2> raise ValueError('Packet is too short (%d bytes)' % datagram_length)
<3>
<4> first_byte = data[0]
<5> if first_byte & PACKET_LONG_HEADER:
<6> if datagram_length < 6:
<7> raise ValueError('Long header is too short (%d bytes)' % datagram_length)
<8>
<9> version, cid_lengths = unpack_from('!LB', data, 1)
<10> pos = 6
<11>
<12> destination_cid_length = decode_cid_length(cid_lengths // 16)
<13> destination_cid = data[pos:pos + destination_cid_length]
<14> pos += destination_cid_length
<15>
<16> source_cid_length = decode_cid_length(cid_lengths % 16)
<17> source_cid = data[pos:pos + source_cid_length]
<18> pos += source_cid_length
<19>
<20> packet_type = first_byte & PACKET_TYPE_MASK
<21> if packet_type == PACKET_TYPE_INITIAL:
<22> token_length, pos = unpack_variable_length(data, pos)
<23> token = data[pos:pos + token_length]
<24> pos += token_length
<25>
<26> length, pos = unpack_variable_length(data, pos)
<27> else:
<28> raise ValueError('Long header packet type 0x%x is not supported' % packet_type)
<29>
<30> return QuicHeader(
<31> version=version,
<32> destination_cid=destination_cid,
<33> source_cid=source_cid,
<34> token=token)
<35> else:
<36> # short header packet
<37> raise ValueError('Short header is not supported yet')
<38>
|
===========unchanged ref 0===========
at: _struct
unpack_from(format, /, buffer, offset=0)
at: aioquic.packet
PACKET_TYPE_INITIAL = PACKET_LONG_HEADER | PACKET_FIXED_BIT | 0x00
PACKET_TYPE_MASK = 0xf0
decode_cid_length(length)
is_long_header(first_byte)
unpack_variable_length(data, pos=0)
QuicHeader(version: int, destination_cid: bytes, source_cid: bytes, token: bytes=b'')
at: aioquic.packet.QuicHeader
version: int
===========changed ref 0===========
# module: aioquic.packet
+ def is_long_header(first_byte):
+ return bool(first_byte & PACKET_LONG_HEADER)
+
|
aioquic.crypto/encrypt_packet
|
Modified
|
aiortc~aioquic
|
d4983e27c5c2436be7aa820b24a45dbda38e0815
|
[crypto] add decrypt_packet function
|
<12>:<add> sample_offset = MAX_PN_SIZE - pn_length
<del> sample_offset = 4 - pn_length
|
# module: aioquic.crypto
def encrypt_packet(key, iv, hp, plain_header, plain_payload):
<0> pn_length = (plain_header[0] & 0x03) + 1
<1> pn_offset = len(plain_header) - pn_length
<2> pn = plain_header[pn_offset:pn_offset + pn_length]
<3>
<4> # payload protection
<5> nonce = bytearray(len(iv) - pn_length) + bytearray(pn)
<6> for i in range(len(iv)):
<7> nonce[i] ^= iv[i]
<8> aesgcm = aead.AESGCM(key)
<9> protected_payload = aesgcm.encrypt(nonce, plain_payload, plain_header)
<10>
<11> # header protection
<12> sample_offset = 4 - pn_length
<13> sample = protected_payload[sample_offset:sample_offset + 16]
<14> cipher = Cipher(algorithms.AES(hp), modes.ECB(), backend=backend)
<15> encryptor = cipher.encryptor()
<16> buf = bytearray(31)
<17> encryptor.update_into(sample, buf)
<18> mask = buf[:5]
<19>
<20> packet = bytearray(plain_header + protected_payload)
<21> if is_long_header(packet[0]):
<22> # long header
<23> packet[0] ^= (mask[0] & 0x0f)
<24> else:
<25> # short header
<26> packet[0] ^= (mask[0] & 0x1f)
<27>
<28> for i in range(pn_length):
<29> packet[pn_offset + i] ^= mask[1 + i]
<30>
<31> return packet
<32>
|
===========unchanged ref 0===========
at: aioquic.crypto
backend = default_backend()
MAX_PN_SIZE = 4
at: aioquic.packet
is_long_header(first_byte)
===========changed ref 0===========
# module: aioquic.crypto
algorithm = hashes.SHA256()
backend = default_backend()
INITIAL_SALT = binascii.unhexlify('ef4fb0abb47470c41befcf8031334fae485e09a0')
+ MAX_PN_SIZE = 4
|
tests.test_crypto/CryptoTest.test_encrypt_packet_client
|
Modified
|
aiortc~aioquic
|
d4983e27c5c2436be7aa820b24a45dbda38e0815
|
[crypto] add decrypt_packet function
|
<3>:<del> plain_header = binascii.unhexlify('c3ff000012508394c8f03e51570800449f00000002')
<4>:<del> plain_payload = binascii.unhexlify(
<5>:<del> '060040c4010000c003036660261ff947cea49cce6cfad687f457cf1b14531ba1'
<6>:<del> '4131a0e8f309a1d0b9c4000006130113031302010000910000000b0009000006'
<7>:<del> '736572766572ff01000100000a00140012001d00170018001901000101010201'
<8>:<del> '03010400230000003300260024001d00204cfdfcd178b784bf328cae793b136f'
<9>:<del> '2aedce005ff183d7bb1495207236647037002b0003020304000d0020001e0403'
<10>:<del> '05030603020308040805080604010501060102010402050206020202002d0002'
<11>:<del> '0101001c00024001') + bytes(963)
<12>:<add> packet = encrypt_packet(key, iv, hp, CLIENT_PLAIN_HEADER, CLIENT_PLAIN_PAYLOAD)
<add> self.assertEqual(packet, CLIENT_ENCRYPTED_PACKET)
<13>:<del> packet = encrypt_packet(key, iv, hp, plain_header, plain_payload)
<14>:<del>
<15>:<del> self.assertEqual(packet, binascii.unhexlify(
<16>:<del> 'c1ff000012508394c8f03e51570800449f0dbc195a0000f3a694c75775b4e546'
<17>:<del> '172ce9e047cd0b5bee5181648c727adc87f7eae54473ec6cba6bdad4f5982317'
<18>:<del> '4
|
# module: tests.test_crypto
class CryptoTest(TestCase):
def test_encrypt_packet_client(self):
<0> cid = binascii.unhexlify('8394c8f03e515708')
<1> key, iv, hp = derive_keying_material(cid, is_client=True)
<2>
<3> plain_header = binascii.unhexlify('c3ff000012508394c8f03e51570800449f00000002')
<4> plain_payload = binascii.unhexlify(
<5> '060040c4010000c003036660261ff947cea49cce6cfad687f457cf1b14531ba1'
<6> '4131a0e8f309a1d0b9c4000006130113031302010000910000000b0009000006'
<7> '736572766572ff01000100000a00140012001d00170018001901000101010201'
<8> '03010400230000003300260024001d00204cfdfcd178b784bf328cae793b136f'
<9> '2aedce005ff183d7bb1495207236647037002b0003020304000d0020001e0403'
<10> '05030603020308040805080604010501060102010402050206020202002d0002'
<11> '0101001c00024001') + bytes(963)
<12>
<13> packet = encrypt_packet(key, iv, hp, plain_header, plain_payload)
<14>
<15> self.assertEqual(packet, binascii.unhexlify(
<16> 'c1ff000012508394c8f03e51570800449f0dbc195a0000f3a694c75775b4e546'
<17> '172ce9e047cd0b5bee5181648c727adc87f7eae54473ec6cba6bdad4f5982317'
<18> '4</s>
|
===========below chunk 0===========
# module: tests.test_crypto
class CryptoTest(TestCase):
def test_encrypt_packet_client(self):
# offset: 1
'8b5c88b9fd9279ffff3b0f4ecf95c4624db6d65d4113329ee9b0bf8cdd7c8a8d'
'72806d55df25ecb66488bc119d7c9a29abaf99bb33c56b08ad8c26995f838bb3'
'b7a3d5c1858b8ec06b839db2dcf918d5ea9317f1acd6b663cc8925868e2f6a1b'
'da546695f3c3f33175944db4a11a346afb07e78489e509b02add51b7b203eda5'
'c330b03641179a31fbba9b56ce00f3d5b5e3d7d9c5429aebb9576f2f7eacbe27'
'bc1b8082aaf68fb69c921aa5d33ec0c8510410865a178d86d7e54122d55ef2c2'
'bbc040be46d7fece73fe8a1b24495ec160df2da9b20a7ba2f26dfa2a44366dbc'
'63de5cd7d7c94c57172fe6d79c901f025c0010b02c89b395402c009f62dc053b'
'8067a1e0ed0a1e0cf5087d7f78cbd94afe0c3dd55d2d4b1a5cfe2b68b86264e3'
'51d1dcd858783a240f893f008ceed743d969b8f735a1677ead960b1fb1ecc5ac'
'83c273b</s>
===========below chunk 1===========
# module: tests.test_crypto
class CryptoTest(TestCase):
def test_encrypt_packet_client(self):
# offset: 2
<s>d969b8f735a1677ead960b1fb1ecc5ac'
'83c273b49288d02d7286207e663c45e1a7baf50640c91e762941cf380ce8d79f'
'3e86767fbbcd25b42ef70ec334835a3a6d792e170a432ce0cb7bde9aaa1e7563'
'7c1c34ae5fef4338f53db8b13a4d2df594efbfa08784543815c9c0d487bddfa1'
'539bc252cf43ec3686e9802d651cfd2a829a06a9f332a733a4a8aed80efe3478'
'093fbc69c8608146b3f16f1a5c4eac9320da49f1afa5f538ddecbbe7888f4355'
'12d0dd74fd9b8c99e3145ba84410d8ca9a36dd884109e76e5fb8222a52e1473d'
'a168519ce7a8a3c32e9149671b16724c6c5c51bb5cd64fb591e567fb78b10f9f'
'6fee62c276f282a7df6bcf7c17747bc9a81e6c9c3b032fdd0e1c3ac9eaa5077d'
'e3ded18b2ed4faf328f49875af2e36ad5ce5f6cc99ef4b60e57b3b5b</s>
===========below chunk 2===========
# module: tests.test_crypto
class CryptoTest(TestCase):
def test_encrypt_packet_client(self):
# offset: 3
<s>9fcbcd'
'4cfb3975e70ce4c2506bcd71fef0e53592461504e3d42c885caab21b782e2629'
'4c6a9d61118cc40a26f378441ceb48f31a362bf8502a723a36c63502229a462c'
'c2a3796279a5e3a7f81a68c7f81312c381cc16a4ab03513a51ad5b54306ec1d7'
'8a5e47e2b15e5b7a1438e5b8b2882dbdad13d6a4a8c3558cae043501b68eb3b0'
'40067152337c051c40b5af809aca2856986fd1c86a4ade17d254b6262ac1bc07'
'7343b52bf89fa27d73e3c6f3118c9961f0bebe68a5c323c2d84b8c29a2807df6'
'63635223242a2ce9828d4429ac270aab5f1841e8e49cf433b1547989f419caa3'
'c758fff96ded40cf3427f0761b678daa1a9e5554465d46b7a917493fc70f9ec5'
'e4e5d786ca501730898aaa1151dcd31829641e29428d90e6065511c24d3109f7'
'cba32225d4accfc54fec42</s>
===========below chunk 3===========
# module: tests.test_crypto
class CryptoTest(TestCase):
def test_encrypt_packet_client(self):
# offset: 4
<s>33f9585252ee36fa5ea0c656934385b468eee245'
'315146b8c047ed27c519b2c0a52d33efe72c186ffe0a230f505676c5324baa6a'
'e006a73e13aa8c39ab173ad2b2778eea0b34c46f2b3beae2c62a2c8db238bf58'
'fc7c27bdceb96c56d29deec87c12351bfd5962497418716a4b915d334ffb5b92'
'ca94ffe1e4f78967042638639a9de325357f5f08f6435061e5a274703936c06f'
'c56af92c420797499ca431a7abaa461863bca656facfad564e6274d4a741033a'
'ca1e31bf63200df41cdf41c10b912bec'))
===========unchanged ref 0===========
at: aioquic.crypto
derive_keying_material(cid, is_client)
at: binascii
unhexlify(hexstr: _Ascii, /) -> bytes
at: unittest.case
TestCase(methodName: str=...)
at: unittest.case.TestCase
failureException: Type[BaseException]
longMessage: bool
maxDiff: Optional[int]
_testMethodName: str
_testMethodDoc: str
assertEqual(first: Any, second: Any, msg: Any=...) -> None
|
tests.test_crypto/CryptoTest.test_encrypt_packet_server
|
Modified
|
aiortc~aioquic
|
d4983e27c5c2436be7aa820b24a45dbda38e0815
|
[crypto] add decrypt_packet function
|
<3>:<del> plain_header = binascii.unhexlify('c1ff00001205f067a5502a4262b50040740001')
<4>:<del> plain_payload = binascii.unhexlify(
<5>:<del> '0d0000000018410a020000560303eefce7f7b37ba1d1632e96677825ddf73988'
<6>:<del> 'cfc79825df566dc5430b9a045a1200130100002e00330024001d00209d3c940d'
<7>:<del> '89690b84d08a60993c144eca684d1081287c834d5311bcf32bb9da1a002b0002'
<8>:<del> '0304')
<9>:<add> packet = encrypt_packet(key, iv, hp, SERVER_PLAIN_HEADER, SERVER_PLAIN_PAYLOAD)
<add> self.assertEqual(packet, SERVER_ENCRYPTED_PACKET)
<10>:<del> packet = encrypt_packet(key, iv, hp, plain_header, plain_payload)
<11>:<del>
<12>:<del> self.assertEqual(packet, binascii.unhexlify(
<13>:<del> 'c4ff00001205f067a5502a4262b5004074f7ed5f01c4c2a2303d297e3c519bf6'
<14>:<del> 'b22386e3d0bd6dfc66121677298031041bb9a79c9f0f9d4c5877270a660f5da3'
<15>:<del> '6207d98b73839b2fdf2ef8e7df5a51b17b8c68d864fd3e708c6c1b71a98a3318'
<16>:<del> '15599ef5014ea38c44bdfd387c03b5275c35e009b6238f831420047c7271281c'
<17>:<del> 'cb54df7884'))
<18>:<del>
|
# module: tests.test_crypto
class CryptoTest(TestCase):
def test_encrypt_packet_server(self):
<0> cid = binascii.unhexlify('8394c8f03e515708')
<1> key, iv, hp = derive_keying_material(cid, is_client=False)
<2>
<3> plain_header = binascii.unhexlify('c1ff00001205f067a5502a4262b50040740001')
<4> plain_payload = binascii.unhexlify(
<5> '0d0000000018410a020000560303eefce7f7b37ba1d1632e96677825ddf73988'
<6> 'cfc79825df566dc5430b9a045a1200130100002e00330024001d00209d3c940d'
<7> '89690b84d08a60993c144eca684d1081287c834d5311bcf32bb9da1a002b0002'
<8> '0304')
<9>
<10> packet = encrypt_packet(key, iv, hp, plain_header, plain_payload)
<11>
<12> self.assertEqual(packet, binascii.unhexlify(
<13> 'c4ff00001205f067a5502a4262b5004074f7ed5f01c4c2a2303d297e3c519bf6'
<14> 'b22386e3d0bd6dfc66121677298031041bb9a79c9f0f9d4c5877270a660f5da3'
<15> '6207d98b73839b2fdf2ef8e7df5a51b17b8c68d864fd3e708c6c1b71a98a3318'
<16> '15599ef5014ea38c44bdfd387c03b5275c35e009b6238f831420047c7271281c'
<17> 'cb54df7884'))
<18>
|
===========unchanged ref 0===========
at: aioquic.crypto
derive_keying_material(cid, is_client)
decrypt_packet(key, iv, hp, packet, encrypted_offset)
at: binascii
unhexlify(hexstr: _Ascii, /) -> bytes
at: tests.test_crypto
CLIENT_PLAIN_HEADER = binascii.unhexlify('c3ff000012508394c8f03e51570800449f00000002')
CLIENT_PLAIN_PAYLOAD = binascii.unhexlify(
'060040c4010000c003036660261ff947cea49cce6cfad687f457cf1b14531ba1'
'4131a0e8f309a1d0b9c4000006130113031302010000910000000b0009000006'
'736572766572ff01000100000a00140012001d00170018001901000101010201'
'03010400230000003300260024001d00204cfdfcd178b784bf328cae793b136f'
'2aedce005ff183d7bb1495207236647037002b0003020304000d0020001e0403'
'05030603020308040805080604010501060102010402050206020202002d0002'
'0101001c00024001') + bytes(963)
===========unchanged ref 1===========
CLIENT_ENCRYPTED_PACKET = binascii.unhexlify(
'c1ff000012508394c8f03e51570800449f0dbc195a0000f3a694c75775b4e546'
'172ce9e047cd0b5bee5181648c727adc87f7eae54473ec6cba6bdad4f5982317'
'4b769f12358abd292d4f3286934484fb8b239c38732e1f3bbbc6a003056487eb'
'8b5c88b9fd9279ffff3b0f4ecf95c4624db6d65d4113329ee9b0bf8cdd7c8a8d'
'72806d55df25ecb66488bc119d7c9a29abaf99bb33c56b08ad8c26995f838bb3'
'b7a3d5c1858b8ec06b839db2dcf918d5ea9317f1acd6b663cc8925868e2f6a1b'
'da546695f3c3f33175944db4a11a346afb07e78489e509b02add51b7b203eda5'
'c330b03641179a31fbba9b56ce00f3d5b5e3d7d9c5429aebb9576f2f7eacbe27'
'bc1b8082aaf68fb69c921aa5d33ec0c8510410865a178d86d7e54122d55ef2c2'
'bbc040be46d7fece73fe8a1b24495ec160df2da9b20a7ba2f26dfa2a44366dbc'
'63de5cd7d7c94c57172fe6d79c901f025c0010b</s>
===========unchanged ref 2===========
at: unittest.case.TestCase
assertEqual(first: Any, second: Any, msg: Any=...) -> None
===========changed ref 0===========
# module: aioquic.crypto
+ def decrypt_packet(key, iv, hp, packet, encrypted_offset):
+ packet = bytearray(packet)
+
+ # header protection
+ sample_offset = encrypted_offset + MAX_PN_SIZE
+ sample = packet[sample_offset:sample_offset + 16]
+ cipher = Cipher(algorithms.AES(hp), modes.ECB(), backend=backend)
+ encryptor = cipher.encryptor()
+ buf = bytearray(31)
+ encryptor.update_into(sample, buf)
+ mask = buf[:5]
+
+ if is_long_header(packet[0]):
+ # long header
+ packet[0] ^= (mask[0] & 0x0f)
+ else:
+ # short header
+ packet[0] ^= (mask[0] & 0x1f)
+
+ pn_length = (packet[0] & 0x03) + 1
+ for i in range(pn_length):
+ packet[encrypted_offset + i] ^= mask[1 + i]
+ pn = packet[encrypted_offset:encrypted_offset + pn_length]
+ plain_header = bytes(packet[:encrypted_offset + pn_length])
+
+ # payload protection
+ nonce = bytearray(len(iv) - pn_length) + bytearray(pn)
+ for i in range(len(iv)):
+ nonce[i] ^= iv[i]
+ aesgcm = aead.AESGCM(key)
+ payload = aesgcm.decrypt(nonce, bytes(packet[encrypted_offset + pn_length:]), plain_header)
+
+ return plain_header, payload
+
===========changed ref 1===========
# module: aioquic.crypto
algorithm = hashes.SHA256()
backend = default_backend()
INITIAL_SALT = binascii.unhexlify('ef4fb0abb47470c41befcf8031334fae485e09a0')
+ MAX_PN_SIZE = 4
===========changed ref 2===========
# module: aioquic.crypto
def encrypt_packet(key, iv, hp, plain_header, plain_payload):
pn_length = (plain_header[0] & 0x03) + 1
pn_offset = len(plain_header) - pn_length
pn = plain_header[pn_offset:pn_offset + pn_length]
# payload protection
nonce = bytearray(len(iv) - pn_length) + bytearray(pn)
for i in range(len(iv)):
nonce[i] ^= iv[i]
aesgcm = aead.AESGCM(key)
protected_payload = aesgcm.encrypt(nonce, plain_payload, plain_header)
# header protection
+ sample_offset = MAX_PN_SIZE - pn_length
- sample_offset = 4 - pn_length
sample = protected_payload[sample_offset:sample_offset + 16]
cipher = Cipher(algorithms.AES(hp), modes.ECB(), backend=backend)
encryptor = cipher.encryptor()
buf = bytearray(31)
encryptor.update_into(sample, buf)
mask = buf[:5]
packet = bytearray(plain_header + protected_payload)
if is_long_header(packet[0]):
# long header
packet[0] ^= (mask[0] & 0x0f)
else:
# short header
packet[0] ^= (mask[0] & 0x1f)
for i in range(pn_length):
packet[pn_offset + i] ^= mask[1 + i]
return packet
|
aioquic.packet/QuicHeader.parse
|
Modified
|
aiortc~aioquic
|
3ec8b532ad398e0ccda2d43cd009044d01a8f263
|
[packet] check fixed bit is set both for short and long headers
|
<5>:<add> if not (first_byte & PACKET_FIXED_BIT):
<add> raise ValueError('Packet fixed bit is zero')
<add>
<add> token = b''
<27>:<del> else:
<28>:<del> raise ValueError('Long header packet type 0x%x is not supported' % packet_type)
<34>:<add> encrypted_offset=pos,
|
# module: aioquic.packet
@dataclass
class QuicHeader:
@classmethod
def parse(cls, data):
<0> datagram_length = len(data)
<1> if datagram_length < 2:
<2> raise ValueError('Packet is too short (%d bytes)' % datagram_length)
<3>
<4> first_byte = data[0]
<5> if is_long_header(first_byte):
<6> if datagram_length < 6:
<7> raise ValueError('Long header is too short (%d bytes)' % datagram_length)
<8>
<9> version, cid_lengths = unpack_from('!LB', data, 1)
<10> pos = 6
<11>
<12> destination_cid_length = decode_cid_length(cid_lengths // 16)
<13> destination_cid = data[pos:pos + destination_cid_length]
<14> pos += destination_cid_length
<15>
<16> source_cid_length = decode_cid_length(cid_lengths % 16)
<17> source_cid = data[pos:pos + source_cid_length]
<18> pos += source_cid_length
<19>
<20> packet_type = first_byte & PACKET_TYPE_MASK
<21> if packet_type == PACKET_TYPE_INITIAL:
<22> token_length, pos = unpack_variable_length(data, pos)
<23> token = data[pos:pos + token_length]
<24> pos += token_length
<25>
<26> length, pos = unpack_variable_length(data, pos)
<27> else:
<28> raise ValueError('Long header packet type 0x%x is not supported' % packet_type)
<29>
<30> return QuicHeader(
<31> version=version,
<32> destination_cid=destination_cid,
<33> source_cid=source_cid,
<34> token=token)
<35> else:
<36> # short header packet
<37> raise ValueError('Short header is not supported yet')
<38>
|
===========unchanged ref 0===========
at: _struct
unpack_from(format, /, buffer, offset=0)
at: aioquic.packet
PACKET_FIXED_BIT = 0x40
PACKET_TYPE_INITIAL = PACKET_LONG_HEADER | PACKET_FIXED_BIT | 0x00
PACKET_TYPE_MASK = 0xf0
decode_cid_length(length)
is_long_header(first_byte)
unpack_variable_length(data, pos=0)
QuicHeader(version: int, destination_cid: bytes, source_cid: bytes, encrypted_offset: int, token: bytes=b'')
at: aioquic.packet.QuicHeader
version: int
destination_cid: bytes
source_cid: bytes
encrypted_offset: int
token: bytes = b''
===========changed ref 0===========
# module: aioquic.packet
@dataclass
class QuicHeader:
version: int
destination_cid: bytes
source_cid: bytes
+ encrypted_offset: int
token: bytes = b''
|
tests.test_packet/PacketTest.test_parse_initial_client
|
Modified
|
aiortc~aioquic
|
3ec8b532ad398e0ccda2d43cd009044d01a8f263
|
[packet] check fixed bit is set both for short and long headers
|
<6>:<add> self.assertEqual(header.encrypted_offset, 17)
|
# module: tests.test_packet
class PacketTest(TestCase):
def test_parse_initial_client(self):
<0> data = load('initial_client.bin')
<1> header = QuicHeader.parse(data)
<2> self.assertEqual(header.version, 0xff000011)
<3> self.assertEqual(header.destination_cid, binascii.unhexlify('90ed1e1c7b04b5d3'))
<4> self.assertEqual(header.source_cid, b'')
<5> self.assertEqual(header.token, b'')
<6>
|
===========unchanged ref 0===========
at: aioquic.packet
QuicHeader(version: int, destination_cid: bytes, source_cid: bytes, encrypted_offset: int, token: bytes=b'')
at: aioquic.packet.QuicHeader
version: int
destination_cid: bytes
source_cid: bytes
encrypted_offset: int
token: bytes = b''
parse(data)
at: binascii
unhexlify(hexstr: _Ascii, /) -> bytes
at: tests.utils
load(name)
at: unittest.case.TestCase
failureException: Type[BaseException]
longMessage: bool
maxDiff: Optional[int]
_testMethodName: str
_testMethodDoc: str
assertEqual(first: Any, second: Any, msg: Any=...) -> None
===========changed ref 0===========
# module: aioquic.packet
@dataclass
class QuicHeader:
version: int
destination_cid: bytes
source_cid: bytes
+ encrypted_offset: int
token: bytes = b''
===========changed ref 1===========
# module: aioquic.packet
@dataclass
class QuicHeader:
@classmethod
def parse(cls, data):
datagram_length = len(data)
if datagram_length < 2:
raise ValueError('Packet is too short (%d bytes)' % datagram_length)
first_byte = data[0]
+ if not (first_byte & PACKET_FIXED_BIT):
+ raise ValueError('Packet fixed bit is zero')
+
+ token = b''
if is_long_header(first_byte):
if datagram_length < 6:
raise ValueError('Long header is too short (%d bytes)' % datagram_length)
version, cid_lengths = unpack_from('!LB', data, 1)
pos = 6
destination_cid_length = decode_cid_length(cid_lengths // 16)
destination_cid = data[pos:pos + destination_cid_length]
pos += destination_cid_length
source_cid_length = decode_cid_length(cid_lengths % 16)
source_cid = data[pos:pos + source_cid_length]
pos += source_cid_length
packet_type = first_byte & PACKET_TYPE_MASK
if packet_type == PACKET_TYPE_INITIAL:
token_length, pos = unpack_variable_length(data, pos)
token = data[pos:pos + token_length]
pos += token_length
length, pos = unpack_variable_length(data, pos)
- else:
- raise ValueError('Long header packet type 0x%x is not supported' % packet_type)
return QuicHeader(
version=version,
destination_cid=destination_cid,
source_cid=source_cid,
+ encrypted_offset=pos,
token=token)
else:
# short header packet
raise ValueError('Short header is not supported yet')
|
tests.test_packet/PacketTest.test_parse_initial_server
|
Modified
|
aiortc~aioquic
|
3ec8b532ad398e0ccda2d43cd009044d01a8f263
|
[packet] check fixed bit is set both for short and long headers
|
<6>:<add> self.assertEqual(header.encrypted_offset, 17)
|
# module: tests.test_packet
class PacketTest(TestCase):
def test_parse_initial_server(self):
<0> data = load('initial_server.bin')
<1> header = QuicHeader.parse(data)
<2> self.assertEqual(header.version, 0xff000011)
<3> self.assertEqual(header.destination_cid, b'')
<4> self.assertEqual(header.source_cid, binascii.unhexlify('0fcee9852fde8780'))
<5> self.assertEqual(header.token, b'')
<6>
|
===========unchanged ref 0===========
at: aioquic.packet
QuicHeader(version: int, destination_cid: bytes, source_cid: bytes, encrypted_offset: int, token: bytes=b'')
at: aioquic.packet.QuicHeader
version: int
destination_cid: bytes
parse(data)
at: binascii
unhexlify(hexstr: _Ascii, /) -> bytes
at: tests.utils
load(name)
at: unittest.case.TestCase
assertEqual(first: Any, second: Any, msg: Any=...) -> None
===========changed ref 0===========
# module: aioquic.packet
@dataclass
class QuicHeader:
version: int
destination_cid: bytes
source_cid: bytes
+ encrypted_offset: int
token: bytes = b''
===========changed ref 1===========
# module: aioquic.packet
@dataclass
class QuicHeader:
@classmethod
def parse(cls, data):
datagram_length = len(data)
if datagram_length < 2:
raise ValueError('Packet is too short (%d bytes)' % datagram_length)
first_byte = data[0]
+ if not (first_byte & PACKET_FIXED_BIT):
+ raise ValueError('Packet fixed bit is zero')
+
+ token = b''
if is_long_header(first_byte):
if datagram_length < 6:
raise ValueError('Long header is too short (%d bytes)' % datagram_length)
version, cid_lengths = unpack_from('!LB', data, 1)
pos = 6
destination_cid_length = decode_cid_length(cid_lengths // 16)
destination_cid = data[pos:pos + destination_cid_length]
pos += destination_cid_length
source_cid_length = decode_cid_length(cid_lengths % 16)
source_cid = data[pos:pos + source_cid_length]
pos += source_cid_length
packet_type = first_byte & PACKET_TYPE_MASK
if packet_type == PACKET_TYPE_INITIAL:
token_length, pos = unpack_variable_length(data, pos)
token = data[pos:pos + token_length]
pos += token_length
length, pos = unpack_variable_length(data, pos)
- else:
- raise ValueError('Long header packet type 0x%x is not supported' % packet_type)
return QuicHeader(
version=version,
destination_cid=destination_cid,
source_cid=source_cid,
+ encrypted_offset=pos,
token=token)
else:
# short header packet
raise ValueError('Short header is not supported yet')
===========changed ref 2===========
# module: tests.test_packet
class PacketTest(TestCase):
def test_parse_initial_client(self):
data = load('initial_client.bin')
header = QuicHeader.parse(data)
self.assertEqual(header.version, 0xff000011)
self.assertEqual(header.destination_cid, binascii.unhexlify('90ed1e1c7b04b5d3'))
self.assertEqual(header.source_cid, b'')
self.assertEqual(header.token, b'')
+ self.assertEqual(header.encrypted_offset, 17)
|
tests.test_packet/PacketTest.test_parse_long_header_too_short
|
Modified
|
aiortc~aioquic
|
3ec8b532ad398e0ccda2d43cd009044d01a8f263
|
[packet] check fixed bit is set both for short and long headers
|
<1>:<add> QuicHeader.parse(b'\xc0\x00')
<del> QuicHeader.parse(b'\x80\x00')
|
# module: tests.test_packet
class PacketTest(TestCase):
def test_parse_long_header_too_short(self):
<0> with self.assertRaises(ValueError) as cm:
<1> QuicHeader.parse(b'\x80\x00')
<2> self.assertEqual(str(cm.exception), 'Long header is too short (2 bytes)')
<3>
|
===========unchanged ref 0===========
at: aioquic.packet
QuicHeader(version: int, destination_cid: bytes, source_cid: bytes, encrypted_offset: int, token: bytes=b'')
at: aioquic.packet.QuicHeader
parse(data)
at: unittest.case.TestCase
assertEqual(first: Any, second: Any, msg: Any=...) -> None
assertRaises(expected_exception: Union[Type[_E], Tuple[Type[_E], ...]], msg: Any=...) -> _AssertRaisesContext[_E]
assertRaises(expected_exception: Union[Type[BaseException], Tuple[Type[BaseException], ...]], callable: Callable[..., Any], *args: Any, **kwargs: Any) -> None
at: unittest.case._AssertRaisesContext.__exit__
self.exception = exc_value.with_traceback(None)
===========changed ref 0===========
# module: aioquic.packet
@dataclass
class QuicHeader:
version: int
destination_cid: bytes
source_cid: bytes
+ encrypted_offset: int
token: bytes = b''
===========changed ref 1===========
# module: aioquic.packet
@dataclass
class QuicHeader:
@classmethod
def parse(cls, data):
datagram_length = len(data)
if datagram_length < 2:
raise ValueError('Packet is too short (%d bytes)' % datagram_length)
first_byte = data[0]
+ if not (first_byte & PACKET_FIXED_BIT):
+ raise ValueError('Packet fixed bit is zero')
+
+ token = b''
if is_long_header(first_byte):
if datagram_length < 6:
raise ValueError('Long header is too short (%d bytes)' % datagram_length)
version, cid_lengths = unpack_from('!LB', data, 1)
pos = 6
destination_cid_length = decode_cid_length(cid_lengths // 16)
destination_cid = data[pos:pos + destination_cid_length]
pos += destination_cid_length
source_cid_length = decode_cid_length(cid_lengths % 16)
source_cid = data[pos:pos + source_cid_length]
pos += source_cid_length
packet_type = first_byte & PACKET_TYPE_MASK
if packet_type == PACKET_TYPE_INITIAL:
token_length, pos = unpack_variable_length(data, pos)
token = data[pos:pos + token_length]
pos += token_length
length, pos = unpack_variable_length(data, pos)
- else:
- raise ValueError('Long header packet type 0x%x is not supported' % packet_type)
return QuicHeader(
version=version,
destination_cid=destination_cid,
source_cid=source_cid,
+ encrypted_offset=pos,
token=token)
else:
# short header packet
raise ValueError('Short header is not supported yet')
===========changed ref 2===========
# module: tests.test_packet
class PacketTest(TestCase):
+ def test_parse_long_header_no_fixed_bit(self):
+ with self.assertRaises(ValueError) as cm:
+ QuicHeader.parse(b'\x80\x00\x00\x00\x00\x00')
+ self.assertEqual(str(cm.exception), 'Packet fixed bit is zero')
+
===========changed ref 3===========
# module: tests.test_packet
class PacketTest(TestCase):
- def test_parse_long_header_bad_packet_type(self):
- with self.assertRaises(ValueError) as cm:
- QuicHeader.parse(b'\x80\x00\x00\x00\x00\x00')
- self.assertEqual(str(cm.exception), 'Long header packet type 0x80 is not supported')
-
===========changed ref 4===========
# module: tests.test_packet
class PacketTest(TestCase):
def test_parse_initial_server(self):
data = load('initial_server.bin')
header = QuicHeader.parse(data)
self.assertEqual(header.version, 0xff000011)
self.assertEqual(header.destination_cid, b'')
self.assertEqual(header.source_cid, binascii.unhexlify('0fcee9852fde8780'))
self.assertEqual(header.token, b'')
+ self.assertEqual(header.encrypted_offset, 17)
===========changed ref 5===========
# module: tests.test_packet
class PacketTest(TestCase):
def test_parse_initial_client(self):
data = load('initial_client.bin')
header = QuicHeader.parse(data)
self.assertEqual(header.version, 0xff000011)
self.assertEqual(header.destination_cid, binascii.unhexlify('90ed1e1c7b04b5d3'))
self.assertEqual(header.source_cid, b'')
self.assertEqual(header.token, b'')
+ self.assertEqual(header.encrypted_offset, 17)
|
tests.test_packet/PacketTest.test_parse_short_header
|
Modified
|
aiortc~aioquic
|
3ec8b532ad398e0ccda2d43cd009044d01a8f263
|
[packet] check fixed bit is set both for short and long headers
|
<1>:<add> QuicHeader.parse(b'\x40\x00')
<del> QuicHeader.parse(b'\x00\x00')
|
# module: tests.test_packet
class PacketTest(TestCase):
def test_parse_short_header(self):
<0> with self.assertRaises(ValueError) as cm:
<1> QuicHeader.parse(b'\x00\x00')
<2> self.assertEqual(str(cm.exception), 'Short header is not supported yet')
<3>
|
===========unchanged ref 0===========
at: aioquic.packet
QuicHeader(version: int, destination_cid: bytes, source_cid: bytes, encrypted_offset: int, token: bytes=b'')
at: aioquic.packet.QuicHeader
parse(data)
at: unittest.case.TestCase
assertEqual(first: Any, second: Any, msg: Any=...) -> None
assertRaises(expected_exception: Union[Type[_E], Tuple[Type[_E], ...]], msg: Any=...) -> _AssertRaisesContext[_E]
assertRaises(expected_exception: Union[Type[BaseException], Tuple[Type[BaseException], ...]], callable: Callable[..., Any], *args: Any, **kwargs: Any) -> None
at: unittest.case._AssertRaisesContext.__exit__
self.exception = exc_value.with_traceback(None)
===========changed ref 0===========
# module: aioquic.packet
@dataclass
class QuicHeader:
version: int
destination_cid: bytes
source_cid: bytes
+ encrypted_offset: int
token: bytes = b''
===========changed ref 1===========
# module: aioquic.packet
@dataclass
class QuicHeader:
@classmethod
def parse(cls, data):
datagram_length = len(data)
if datagram_length < 2:
raise ValueError('Packet is too short (%d bytes)' % datagram_length)
first_byte = data[0]
+ if not (first_byte & PACKET_FIXED_BIT):
+ raise ValueError('Packet fixed bit is zero')
+
+ token = b''
if is_long_header(first_byte):
if datagram_length < 6:
raise ValueError('Long header is too short (%d bytes)' % datagram_length)
version, cid_lengths = unpack_from('!LB', data, 1)
pos = 6
destination_cid_length = decode_cid_length(cid_lengths // 16)
destination_cid = data[pos:pos + destination_cid_length]
pos += destination_cid_length
source_cid_length = decode_cid_length(cid_lengths % 16)
source_cid = data[pos:pos + source_cid_length]
pos += source_cid_length
packet_type = first_byte & PACKET_TYPE_MASK
if packet_type == PACKET_TYPE_INITIAL:
token_length, pos = unpack_variable_length(data, pos)
token = data[pos:pos + token_length]
pos += token_length
length, pos = unpack_variable_length(data, pos)
- else:
- raise ValueError('Long header packet type 0x%x is not supported' % packet_type)
return QuicHeader(
version=version,
destination_cid=destination_cid,
source_cid=source_cid,
+ encrypted_offset=pos,
token=token)
else:
# short header packet
raise ValueError('Short header is not supported yet')
===========changed ref 2===========
# module: tests.test_packet
class PacketTest(TestCase):
+ def test_parse_long_header_no_fixed_bit(self):
+ with self.assertRaises(ValueError) as cm:
+ QuicHeader.parse(b'\x80\x00\x00\x00\x00\x00')
+ self.assertEqual(str(cm.exception), 'Packet fixed bit is zero')
+
===========changed ref 3===========
# module: tests.test_packet
class PacketTest(TestCase):
def test_parse_long_header_too_short(self):
with self.assertRaises(ValueError) as cm:
+ QuicHeader.parse(b'\xc0\x00')
- QuicHeader.parse(b'\x80\x00')
self.assertEqual(str(cm.exception), 'Long header is too short (2 bytes)')
===========changed ref 4===========
# module: tests.test_packet
class PacketTest(TestCase):
- def test_parse_long_header_bad_packet_type(self):
- with self.assertRaises(ValueError) as cm:
- QuicHeader.parse(b'\x80\x00\x00\x00\x00\x00')
- self.assertEqual(str(cm.exception), 'Long header packet type 0x80 is not supported')
-
===========changed ref 5===========
# module: tests.test_packet
class PacketTest(TestCase):
def test_parse_initial_server(self):
data = load('initial_server.bin')
header = QuicHeader.parse(data)
self.assertEqual(header.version, 0xff000011)
self.assertEqual(header.destination_cid, b'')
self.assertEqual(header.source_cid, binascii.unhexlify('0fcee9852fde8780'))
self.assertEqual(header.token, b'')
+ self.assertEqual(header.encrypted_offset, 17)
===========changed ref 6===========
# module: tests.test_packet
class PacketTest(TestCase):
def test_parse_initial_client(self):
data = load('initial_client.bin')
header = QuicHeader.parse(data)
self.assertEqual(header.version, 0xff000011)
self.assertEqual(header.destination_cid, binascii.unhexlify('90ed1e1c7b04b5d3'))
self.assertEqual(header.source_cid, b'')
self.assertEqual(header.token, b'')
+ self.assertEqual(header.encrypted_offset, 17)
|
tests.test_packet/PacketTest.test_parse_initial_client
|
Modified
|
aiortc~aioquic
|
8dd30bd437180a690be542a991317b89ccb70659
|
[tls] parse and serialize ClientHello and ServerHello
|
<0>:<add> buf = Buffer(data=load('initial_client.bin'))
<del> data = load('initial_client.bin')
<1>:<add> header = pull_quic_header(buf)
<del> header = QuicHeader.parse(data)
|
# module: tests.test_packet
class PacketTest(TestCase):
def test_parse_initial_client(self):
<0> data = load('initial_client.bin')
<1> header = QuicHeader.parse(data)
<2> self.assertEqual(header.version, 0xff000011)
<3> self.assertEqual(header.destination_cid, binascii.unhexlify('90ed1e1c7b04b5d3'))
<4> self.assertEqual(header.source_cid, b'')
<5> self.assertEqual(header.token, b'')
<6> self.assertEqual(header.encrypted_offset, 17)
<7>
|
===========unchanged ref 0===========
at: aioquic.packet
pull_quic_header(buf)
at: aioquic.tls
Buffer(capacity=None, data=None)
at: unittest.case.TestCase
failureException: Type[BaseException]
longMessage: bool
maxDiff: Optional[int]
_testMethodName: str
_testMethodDoc: str
assertEqual(first: Any, second: Any, msg: Any=...) -> None
assertRaises(expected_exception: Union[Type[_E], Tuple[Type[_E], ...]], msg: Any=...) -> _AssertRaisesContext[_E]
assertRaises(expected_exception: Union[Type[BaseException], Tuple[Type[BaseException], ...]], callable: Callable[..., Any], *args: Any, **kwargs: Any) -> None
at: unittest.case._AssertRaisesContext.__exit__
self.exception = exc_value.with_traceback(None)
===========changed ref 0===========
# module: aioquic.packet
+ def pull_quic_header(buf):
+ first_byte = pull_uint8(buf)
+ if not (first_byte & PACKET_FIXED_BIT):
+ raise ValueError('Packet fixed bit is zero')
+
+ token = b''
+ if is_long_header(first_byte):
+ version = pull_uint32(buf)
+ cid_lengths = pull_uint8(buf)
+
+ destination_cid_length = decode_cid_length(cid_lengths // 16)
+ destination_cid = pull_bytes(buf, destination_cid_length)
+
+ source_cid_length = decode_cid_length(cid_lengths % 16)
+ source_cid = pull_bytes(buf, source_cid_length)
+
+ packet_type = first_byte & PACKET_TYPE_MASK
+ if packet_type == PACKET_TYPE_INITIAL:
+ token_length = pull_uint_var(buf)
+ token = pull_bytes(buf, token_length)
+ pull_uint_var(buf)
+
+ return QuicHeader(
+ version=version,
+ destination_cid=destination_cid,
+ source_cid=source_cid,
+ encrypted_offset=buf._pos,
+ token=token)
+ else:
+ # short header packet
+ raise ValueError('Short header is not supported yet')
+
===========changed ref 1===========
# module: tests.test_packet
class PacketTest(TestCase):
+ def test_parse_empty(self):
+ buf = Buffer(data=b'')
+ with self.assertRaises(BufferReadError):
+ pull_quic_header(buf)
+
===========changed ref 2===========
# module: tests.test_packet
- class UtilTest(TestCase):
- def test_unpack_variable_length(self):
- # 1 byte
- self.assertEqual(unpack_variable_length(b'\x00'), (0, 1))
- self.assertEqual(unpack_variable_length(b'\x01'), (1, 1))
- self.assertEqual(unpack_variable_length(b'\x25'), (37, 1))
- self.assertEqual(unpack_variable_length(b'\x3f'), (63, 1))
-
- # 2 bytes
- self.assertEqual(unpack_variable_length(b'\x7b\xbd'), (15293, 2))
- self.assertEqual(unpack_variable_length(b'\x7f\xff'), (16383, 2))
-
- # 4 bytes
- self.assertEqual(unpack_variable_length(b'\x9d\x7f\x3e\x7d'), (494878333, 4))
- self.assertEqual(unpack_variable_length(b'\xbf\xff\xff\xff'), (1073741823, 4))
-
- # 8 bytes
- self.assertEqual(unpack_variable_length(b'\xc2\x19\x7c\x5e\xff\x14\xe8\x8c'),
- (151288809941952652, 8))
- self.assertEqual(unpack_variable_length(b'\xff\xff\xff\xff\xff\xff\xff\xff'),
- (4611686018427387903, 8))
-
===========changed ref 3===========
+ # module: aioquic.tls
+
+
===========changed ref 4===========
# module: aioquic.packet
-
-
===========changed ref 5===========
+ # module: aioquic.tls
+ class BufferReadError(ValueError):
+ pass
+
===========changed ref 6===========
+ # module: aioquic.tls
+ class Buffer:
+ @property
+ def data(self):
+ return bytes(self._data[:self._pos])
+
===========changed ref 7===========
+ # module: aioquic.tls
+ @contextmanager
+ def push_extension(buf, extension_type):
+ push_uint16(buf, extension_type)
+ with push_block(buf, 2):
+ yield
+
===========changed ref 8===========
+ # module: aioquic.tls
+ def push_tlv32(buf, param, value):
+ push_uint16(buf, param)
+ push_uint16(buf, 4)
+ push_uint32(buf, value)
+
===========changed ref 9===========
+ # module: aioquic.tls
+ def push_tlv16(buf, param, value):
+ push_uint16(buf, param)
+ push_uint16(buf, 2)
+ push_uint16(buf, value)
+
===========changed ref 10===========
+ # module: aioquic.tls
+ def push_tlv8(buf, param, value):
+ push_uint16(buf, param)
+ push_uint16(buf, 1)
+ push_uint8(buf, value)
+
===========changed ref 11===========
+ # module: aioquic.tls
+ def push_uint8(buf, v):
+ """
+ Push an 8-bit unsigned integer.
+ """
+ buf._data[buf._pos] = v
+ buf._pos += 1
+
===========changed ref 12===========
+ # module: aioquic.tls
+ def push_key_share(buf, value):
+ push_uint16(buf, value[0])
+ with push_block(buf, 2):
+ push_bytes(buf, value[1])
+
===========changed ref 13===========
+ # module: aioquic.tls
+ def push_list(buf, capacity, func, values):
+ """
+ Push a list of items.
+ """
+ with push_block(buf, capacity):
+ for value in values:
+ func(buf, value)
+
===========changed ref 14===========
+ # module: aioquic.tls
+ def push_uint64(buf, v):
+ """
+ Push a 64-bit unsigned integer.
+ """
+ pack_into('!Q', buf._data, buf._pos, v)
+ buf._pos += 8
+
===========changed ref 15===========
+ # module: aioquic.tls
+ def push_uint32(buf, v):
+ """
+ Push a 32-bit unsigned integer.
+ """
+ pack_into('!L', buf._data, buf._pos, v)
+ buf._pos += 4
+
===========changed ref 16===========
+ # module: aioquic.tls
+ def push_uint16(buf, v):
+ """
+ Push a 16-bit unsigned integer.
+ """
+ pack_into('!H', buf._data, buf._pos, v)
+ buf._pos += 2
+
===========changed ref 17===========
+ # module: aioquic.tls
+ def push_bytes(buf, v):
+ """
+ Push bytes.
+ """
+ length = len(v)
+ buf._data[buf._pos:buf._pos + length] = v
+ buf._pos += length
+
===========changed ref 18===========
+ # module: aioquic.tls
+ # KeyShareEntry
+
+
+ def pull_key_share(buf):
+ group = pull_uint16(buf)
+ data_length = pull_uint16(buf)
+ data = pull_bytes(buf, data_length)
+ return (group, data)
+
|
tests.test_packet/PacketTest.test_parse_initial_server
|
Modified
|
aiortc~aioquic
|
8dd30bd437180a690be542a991317b89ccb70659
|
[tls] parse and serialize ClientHello and ServerHello
|
<0>:<add> buf = Buffer(data=load('initial_server.bin'))
<del> data = load('initial_server.bin')
<1>:<add> header = pull_quic_header(buf)
<del> header = QuicHeader.parse(data)
|
# module: tests.test_packet
class PacketTest(TestCase):
def test_parse_initial_server(self):
<0> data = load('initial_server.bin')
<1> header = QuicHeader.parse(data)
<2> self.assertEqual(header.version, 0xff000011)
<3> self.assertEqual(header.destination_cid, b'')
<4> self.assertEqual(header.source_cid, binascii.unhexlify('0fcee9852fde8780'))
<5> self.assertEqual(header.token, b'')
<6> self.assertEqual(header.encrypted_offset, 17)
<7>
|
===========unchanged ref 0===========
at: aioquic.packet
pull_quic_header(buf)
at: aioquic.tls
BufferReadError(*args: object)
Buffer(capacity=None, data=None)
at: tests.test_packet.PacketTest.test_parse_long_header_too_short
buf = Buffer(data=b'\xc0\x00')
at: unittest.case.TestCase
assertEqual(first: Any, second: Any, msg: Any=...) -> None
assertRaises(expected_exception: Union[Type[_E], Tuple[Type[_E], ...]], msg: Any=...) -> _AssertRaisesContext[_E]
assertRaises(expected_exception: Union[Type[BaseException], Tuple[Type[BaseException], ...]], callable: Callable[..., Any], *args: Any, **kwargs: Any) -> None
at: unittest.case._AssertRaisesContext.__exit__
self.exception = exc_value.with_traceback(None)
===========changed ref 0===========
+ # module: aioquic.tls
+ class BufferReadError(ValueError):
+ pass
+
===========changed ref 1===========
# module: aioquic.packet
+ def pull_quic_header(buf):
+ first_byte = pull_uint8(buf)
+ if not (first_byte & PACKET_FIXED_BIT):
+ raise ValueError('Packet fixed bit is zero')
+
+ token = b''
+ if is_long_header(first_byte):
+ version = pull_uint32(buf)
+ cid_lengths = pull_uint8(buf)
+
+ destination_cid_length = decode_cid_length(cid_lengths // 16)
+ destination_cid = pull_bytes(buf, destination_cid_length)
+
+ source_cid_length = decode_cid_length(cid_lengths % 16)
+ source_cid = pull_bytes(buf, source_cid_length)
+
+ packet_type = first_byte & PACKET_TYPE_MASK
+ if packet_type == PACKET_TYPE_INITIAL:
+ token_length = pull_uint_var(buf)
+ token = pull_bytes(buf, token_length)
+ pull_uint_var(buf)
+
+ return QuicHeader(
+ version=version,
+ destination_cid=destination_cid,
+ source_cid=source_cid,
+ encrypted_offset=buf._pos,
+ token=token)
+ else:
+ # short header packet
+ raise ValueError('Short header is not supported yet')
+
===========changed ref 2===========
# module: tests.test_packet
class PacketTest(TestCase):
+ def test_parse_empty(self):
+ buf = Buffer(data=b'')
+ with self.assertRaises(BufferReadError):
+ pull_quic_header(buf)
+
===========changed ref 3===========
# module: tests.test_packet
class PacketTest(TestCase):
def test_parse_initial_client(self):
+ buf = Buffer(data=load('initial_client.bin'))
- data = load('initial_client.bin')
+ header = pull_quic_header(buf)
- header = QuicHeader.parse(data)
self.assertEqual(header.version, 0xff000011)
self.assertEqual(header.destination_cid, binascii.unhexlify('90ed1e1c7b04b5d3'))
self.assertEqual(header.source_cid, b'')
self.assertEqual(header.token, b'')
self.assertEqual(header.encrypted_offset, 17)
===========changed ref 4===========
# module: tests.test_packet
- class UtilTest(TestCase):
- def test_unpack_variable_length(self):
- # 1 byte
- self.assertEqual(unpack_variable_length(b'\x00'), (0, 1))
- self.assertEqual(unpack_variable_length(b'\x01'), (1, 1))
- self.assertEqual(unpack_variable_length(b'\x25'), (37, 1))
- self.assertEqual(unpack_variable_length(b'\x3f'), (63, 1))
-
- # 2 bytes
- self.assertEqual(unpack_variable_length(b'\x7b\xbd'), (15293, 2))
- self.assertEqual(unpack_variable_length(b'\x7f\xff'), (16383, 2))
-
- # 4 bytes
- self.assertEqual(unpack_variable_length(b'\x9d\x7f\x3e\x7d'), (494878333, 4))
- self.assertEqual(unpack_variable_length(b'\xbf\xff\xff\xff'), (1073741823, 4))
-
- # 8 bytes
- self.assertEqual(unpack_variable_length(b'\xc2\x19\x7c\x5e\xff\x14\xe8\x8c'),
- (151288809941952652, 8))
- self.assertEqual(unpack_variable_length(b'\xff\xff\xff\xff\xff\xff\xff\xff'),
- (4611686018427387903, 8))
-
===========changed ref 5===========
+ # module: aioquic.tls
+
+
===========changed ref 6===========
# module: aioquic.packet
-
-
===========changed ref 7===========
+ # module: aioquic.tls
+ class Buffer:
+ @property
+ def data(self):
+ return bytes(self._data[:self._pos])
+
===========changed ref 8===========
+ # module: aioquic.tls
+ @contextmanager
+ def push_extension(buf, extension_type):
+ push_uint16(buf, extension_type)
+ with push_block(buf, 2):
+ yield
+
===========changed ref 9===========
+ # module: aioquic.tls
+ def push_tlv32(buf, param, value):
+ push_uint16(buf, param)
+ push_uint16(buf, 4)
+ push_uint32(buf, value)
+
===========changed ref 10===========
+ # module: aioquic.tls
+ def push_tlv16(buf, param, value):
+ push_uint16(buf, param)
+ push_uint16(buf, 2)
+ push_uint16(buf, value)
+
===========changed ref 11===========
+ # module: aioquic.tls
+ def push_tlv8(buf, param, value):
+ push_uint16(buf, param)
+ push_uint16(buf, 1)
+ push_uint8(buf, value)
+
===========changed ref 12===========
+ # module: aioquic.tls
+ def push_uint8(buf, v):
+ """
+ Push an 8-bit unsigned integer.
+ """
+ buf._data[buf._pos] = v
+ buf._pos += 1
+
===========changed ref 13===========
+ # module: aioquic.tls
+ def push_key_share(buf, value):
+ push_uint16(buf, value[0])
+ with push_block(buf, 2):
+ push_bytes(buf, value[1])
+
===========changed ref 14===========
+ # module: aioquic.tls
+ def push_list(buf, capacity, func, values):
+ """
+ Push a list of items.
+ """
+ with push_block(buf, capacity):
+ for value in values:
+ func(buf, value)
+
===========changed ref 15===========
+ # module: aioquic.tls
+ def push_uint64(buf, v):
+ """
+ Push a 64-bit unsigned integer.
+ """
+ pack_into('!Q', buf._data, buf._pos, v)
+ buf._pos += 8
+
===========changed ref 16===========
+ # module: aioquic.tls
+ def push_uint32(buf, v):
+ """
+ Push a 32-bit unsigned integer.
+ """
+ pack_into('!L', buf._data, buf._pos, v)
+ buf._pos += 4
+
===========changed ref 17===========
+ # module: aioquic.tls
+ def push_uint16(buf, v):
+ """
+ Push a 16-bit unsigned integer.
+ """
+ pack_into('!H', buf._data, buf._pos, v)
+ buf._pos += 2
+
|
tests.test_packet/PacketTest.test_parse_long_header_no_fixed_bit
|
Modified
|
aiortc~aioquic
|
8dd30bd437180a690be542a991317b89ccb70659
|
[tls] parse and serialize ClientHello and ServerHello
|
<0>:<add> buf = Buffer(data=b'\x80\x00\x00\x00\x00\x00')
<1>:<add> pull_quic_header(buf)
<del> QuicHeader.parse(b'\x80\x00\x00\x00\x00\x00')
|
# module: tests.test_packet
class PacketTest(TestCase):
def test_parse_long_header_no_fixed_bit(self):
<0> with self.assertRaises(ValueError) as cm:
<1> QuicHeader.parse(b'\x80\x00\x00\x00\x00\x00')
<2> self.assertEqual(str(cm.exception), 'Packet fixed bit is zero')
<3>
|
===========changed ref 0===========
# module: tests.test_packet
class PacketTest(TestCase):
+ def test_parse_empty(self):
+ buf = Buffer(data=b'')
+ with self.assertRaises(BufferReadError):
+ pull_quic_header(buf)
+
===========changed ref 1===========
# module: tests.test_packet
class PacketTest(TestCase):
def test_parse_initial_server(self):
+ buf = Buffer(data=load('initial_server.bin'))
- data = load('initial_server.bin')
+ header = pull_quic_header(buf)
- header = QuicHeader.parse(data)
self.assertEqual(header.version, 0xff000011)
self.assertEqual(header.destination_cid, b'')
self.assertEqual(header.source_cid, binascii.unhexlify('0fcee9852fde8780'))
self.assertEqual(header.token, b'')
self.assertEqual(header.encrypted_offset, 17)
===========changed ref 2===========
# module: tests.test_packet
class PacketTest(TestCase):
def test_parse_initial_client(self):
+ buf = Buffer(data=load('initial_client.bin'))
- data = load('initial_client.bin')
+ header = pull_quic_header(buf)
- header = QuicHeader.parse(data)
self.assertEqual(header.version, 0xff000011)
self.assertEqual(header.destination_cid, binascii.unhexlify('90ed1e1c7b04b5d3'))
self.assertEqual(header.source_cid, b'')
self.assertEqual(header.token, b'')
self.assertEqual(header.encrypted_offset, 17)
===========changed ref 3===========
# module: tests.test_packet
- class UtilTest(TestCase):
- def test_unpack_variable_length(self):
- # 1 byte
- self.assertEqual(unpack_variable_length(b'\x00'), (0, 1))
- self.assertEqual(unpack_variable_length(b'\x01'), (1, 1))
- self.assertEqual(unpack_variable_length(b'\x25'), (37, 1))
- self.assertEqual(unpack_variable_length(b'\x3f'), (63, 1))
-
- # 2 bytes
- self.assertEqual(unpack_variable_length(b'\x7b\xbd'), (15293, 2))
- self.assertEqual(unpack_variable_length(b'\x7f\xff'), (16383, 2))
-
- # 4 bytes
- self.assertEqual(unpack_variable_length(b'\x9d\x7f\x3e\x7d'), (494878333, 4))
- self.assertEqual(unpack_variable_length(b'\xbf\xff\xff\xff'), (1073741823, 4))
-
- # 8 bytes
- self.assertEqual(unpack_variable_length(b'\xc2\x19\x7c\x5e\xff\x14\xe8\x8c'),
- (151288809941952652, 8))
- self.assertEqual(unpack_variable_length(b'\xff\xff\xff\xff\xff\xff\xff\xff'),
- (4611686018427387903, 8))
-
===========changed ref 4===========
+ # module: aioquic.tls
+
+
===========changed ref 5===========
# module: aioquic.packet
-
-
===========changed ref 6===========
+ # module: aioquic.tls
+ class BufferReadError(ValueError):
+ pass
+
===========changed ref 7===========
+ # module: aioquic.tls
+ class Buffer:
+ @property
+ def data(self):
+ return bytes(self._data[:self._pos])
+
===========changed ref 8===========
+ # module: aioquic.tls
+ @contextmanager
+ def push_extension(buf, extension_type):
+ push_uint16(buf, extension_type)
+ with push_block(buf, 2):
+ yield
+
===========changed ref 9===========
+ # module: aioquic.tls
+ def push_tlv32(buf, param, value):
+ push_uint16(buf, param)
+ push_uint16(buf, 4)
+ push_uint32(buf, value)
+
===========changed ref 10===========
+ # module: aioquic.tls
+ def push_tlv16(buf, param, value):
+ push_uint16(buf, param)
+ push_uint16(buf, 2)
+ push_uint16(buf, value)
+
===========changed ref 11===========
+ # module: aioquic.tls
+ def push_tlv8(buf, param, value):
+ push_uint16(buf, param)
+ push_uint16(buf, 1)
+ push_uint8(buf, value)
+
===========changed ref 12===========
+ # module: aioquic.tls
+ def push_uint8(buf, v):
+ """
+ Push an 8-bit unsigned integer.
+ """
+ buf._data[buf._pos] = v
+ buf._pos += 1
+
===========changed ref 13===========
+ # module: aioquic.tls
+ def push_key_share(buf, value):
+ push_uint16(buf, value[0])
+ with push_block(buf, 2):
+ push_bytes(buf, value[1])
+
===========changed ref 14===========
+ # module: aioquic.tls
+ def push_list(buf, capacity, func, values):
+ """
+ Push a list of items.
+ """
+ with push_block(buf, capacity):
+ for value in values:
+ func(buf, value)
+
===========changed ref 15===========
+ # module: aioquic.tls
+ def push_uint64(buf, v):
+ """
+ Push a 64-bit unsigned integer.
+ """
+ pack_into('!Q', buf._data, buf._pos, v)
+ buf._pos += 8
+
===========changed ref 16===========
+ # module: aioquic.tls
+ def push_uint32(buf, v):
+ """
+ Push a 32-bit unsigned integer.
+ """
+ pack_into('!L', buf._data, buf._pos, v)
+ buf._pos += 4
+
===========changed ref 17===========
+ # module: aioquic.tls
+ def push_uint16(buf, v):
+ """
+ Push a 16-bit unsigned integer.
+ """
+ pack_into('!H', buf._data, buf._pos, v)
+ buf._pos += 2
+
===========changed ref 18===========
+ # module: aioquic.tls
+ def push_bytes(buf, v):
+ """
+ Push bytes.
+ """
+ length = len(v)
+ buf._data[buf._pos:buf._pos + length] = v
+ buf._pos += length
+
===========changed ref 19===========
+ # module: aioquic.tls
+ # KeyShareEntry
+
+
+ def pull_key_share(buf):
+ group = pull_uint16(buf)
+ data_length = pull_uint16(buf)
+ data = pull_bytes(buf, data_length)
+ return (group, data)
+
===========changed ref 20===========
+ # module: aioquic.tls
+ # BLOCKS
+
+
+ @contextmanager
+ def pull_block(buf, capacity):
+ length = 0
+ for b in pull_bytes(buf, capacity):
+ length = (length << 8) | b
+ end = buf._pos + length
+ yield end
+ assert buf._pos == end
+
===========changed ref 21===========
# module: aioquic.packet
- def unpack_variable_length(data, pos=0):
- kind = data[pos] // 64
- length, fmt, mask = VARIABLE_LENGTH_FORMATS[kind]
- return unpack_from(fmt, data, pos)[0] & mask, pos + length
-
===========changed ref 22===========
+ # module: aioquic.tls
+ class Buffer:
+ def __init__(self, capacity=None, data=None):
+ if data is not None:
+ self._data = data
+ self._length = len(data)
+ else:
+ self._data = bytearray(capacity)
+ self._length = capacity
+ self._pos = 0
+
|
tests.test_packet/PacketTest.test_parse_long_header_too_short
|
Modified
|
aiortc~aioquic
|
8dd30bd437180a690be542a991317b89ccb70659
|
[tls] parse and serialize ClientHello and ServerHello
|
<0>:<add> buf = Buffer(data=b'\xc0\x00')
<add> with self.assertRaises(BufferReadError):
<del> with self.assertRaises(ValueError) as cm:
<1>:<del> QuicHeader.parse(b'\xc0\x00')
<2>:<del> self.assertEqual(str(cm.exception), 'Long header is too short (2 bytes)')
<3>:<add> pull_quic_header(buf)
|
# module: tests.test_packet
class PacketTest(TestCase):
def test_parse_long_header_too_short(self):
<0> with self.assertRaises(ValueError) as cm:
<1> QuicHeader.parse(b'\xc0\x00')
<2> self.assertEqual(str(cm.exception), 'Long header is too short (2 bytes)')
<3>
|
===========changed ref 0===========
# module: tests.test_packet
class PacketTest(TestCase):
+ def test_parse_empty(self):
+ buf = Buffer(data=b'')
+ with self.assertRaises(BufferReadError):
+ pull_quic_header(buf)
+
===========changed ref 1===========
# module: tests.test_packet
class PacketTest(TestCase):
def test_parse_long_header_no_fixed_bit(self):
+ buf = Buffer(data=b'\x80\x00\x00\x00\x00\x00')
with self.assertRaises(ValueError) as cm:
+ pull_quic_header(buf)
- QuicHeader.parse(b'\x80\x00\x00\x00\x00\x00')
self.assertEqual(str(cm.exception), 'Packet fixed bit is zero')
===========changed ref 2===========
# module: tests.test_packet
class PacketTest(TestCase):
def test_parse_initial_server(self):
+ buf = Buffer(data=load('initial_server.bin'))
- data = load('initial_server.bin')
+ header = pull_quic_header(buf)
- header = QuicHeader.parse(data)
self.assertEqual(header.version, 0xff000011)
self.assertEqual(header.destination_cid, b'')
self.assertEqual(header.source_cid, binascii.unhexlify('0fcee9852fde8780'))
self.assertEqual(header.token, b'')
self.assertEqual(header.encrypted_offset, 17)
===========changed ref 3===========
# module: tests.test_packet
class PacketTest(TestCase):
def test_parse_initial_client(self):
+ buf = Buffer(data=load('initial_client.bin'))
- data = load('initial_client.bin')
+ header = pull_quic_header(buf)
- header = QuicHeader.parse(data)
self.assertEqual(header.version, 0xff000011)
self.assertEqual(header.destination_cid, binascii.unhexlify('90ed1e1c7b04b5d3'))
self.assertEqual(header.source_cid, b'')
self.assertEqual(header.token, b'')
self.assertEqual(header.encrypted_offset, 17)
===========changed ref 4===========
# module: tests.test_packet
- class UtilTest(TestCase):
- def test_unpack_variable_length(self):
- # 1 byte
- self.assertEqual(unpack_variable_length(b'\x00'), (0, 1))
- self.assertEqual(unpack_variable_length(b'\x01'), (1, 1))
- self.assertEqual(unpack_variable_length(b'\x25'), (37, 1))
- self.assertEqual(unpack_variable_length(b'\x3f'), (63, 1))
-
- # 2 bytes
- self.assertEqual(unpack_variable_length(b'\x7b\xbd'), (15293, 2))
- self.assertEqual(unpack_variable_length(b'\x7f\xff'), (16383, 2))
-
- # 4 bytes
- self.assertEqual(unpack_variable_length(b'\x9d\x7f\x3e\x7d'), (494878333, 4))
- self.assertEqual(unpack_variable_length(b'\xbf\xff\xff\xff'), (1073741823, 4))
-
- # 8 bytes
- self.assertEqual(unpack_variable_length(b'\xc2\x19\x7c\x5e\xff\x14\xe8\x8c'),
- (151288809941952652, 8))
- self.assertEqual(unpack_variable_length(b'\xff\xff\xff\xff\xff\xff\xff\xff'),
- (4611686018427387903, 8))
-
===========changed ref 5===========
+ # module: aioquic.tls
+
+
===========changed ref 6===========
# module: aioquic.packet
-
-
===========changed ref 7===========
+ # module: aioquic.tls
+ class BufferReadError(ValueError):
+ pass
+
===========changed ref 8===========
+ # module: aioquic.tls
+ class Buffer:
+ @property
+ def data(self):
+ return bytes(self._data[:self._pos])
+
===========changed ref 9===========
+ # module: aioquic.tls
+ @contextmanager
+ def push_extension(buf, extension_type):
+ push_uint16(buf, extension_type)
+ with push_block(buf, 2):
+ yield
+
===========changed ref 10===========
+ # module: aioquic.tls
+ def push_tlv32(buf, param, value):
+ push_uint16(buf, param)
+ push_uint16(buf, 4)
+ push_uint32(buf, value)
+
===========changed ref 11===========
+ # module: aioquic.tls
+ def push_tlv16(buf, param, value):
+ push_uint16(buf, param)
+ push_uint16(buf, 2)
+ push_uint16(buf, value)
+
===========changed ref 12===========
+ # module: aioquic.tls
+ def push_tlv8(buf, param, value):
+ push_uint16(buf, param)
+ push_uint16(buf, 1)
+ push_uint8(buf, value)
+
===========changed ref 13===========
+ # module: aioquic.tls
+ def push_uint8(buf, v):
+ """
+ Push an 8-bit unsigned integer.
+ """
+ buf._data[buf._pos] = v
+ buf._pos += 1
+
===========changed ref 14===========
+ # module: aioquic.tls
+ def push_key_share(buf, value):
+ push_uint16(buf, value[0])
+ with push_block(buf, 2):
+ push_bytes(buf, value[1])
+
===========changed ref 15===========
+ # module: aioquic.tls
+ def push_list(buf, capacity, func, values):
+ """
+ Push a list of items.
+ """
+ with push_block(buf, capacity):
+ for value in values:
+ func(buf, value)
+
===========changed ref 16===========
+ # module: aioquic.tls
+ def push_uint64(buf, v):
+ """
+ Push a 64-bit unsigned integer.
+ """
+ pack_into('!Q', buf._data, buf._pos, v)
+ buf._pos += 8
+
===========changed ref 17===========
+ # module: aioquic.tls
+ def push_uint32(buf, v):
+ """
+ Push a 32-bit unsigned integer.
+ """
+ pack_into('!L', buf._data, buf._pos, v)
+ buf._pos += 4
+
===========changed ref 18===========
+ # module: aioquic.tls
+ def push_uint16(buf, v):
+ """
+ Push a 16-bit unsigned integer.
+ """
+ pack_into('!H', buf._data, buf._pos, v)
+ buf._pos += 2
+
===========changed ref 19===========
+ # module: aioquic.tls
+ def push_bytes(buf, v):
+ """
+ Push bytes.
+ """
+ length = len(v)
+ buf._data[buf._pos:buf._pos + length] = v
+ buf._pos += length
+
===========changed ref 20===========
+ # module: aioquic.tls
+ # KeyShareEntry
+
+
+ def pull_key_share(buf):
+ group = pull_uint16(buf)
+ data_length = pull_uint16(buf)
+ data = pull_bytes(buf, data_length)
+ return (group, data)
+
===========changed ref 21===========
+ # module: aioquic.tls
+ # BLOCKS
+
+
+ @contextmanager
+ def pull_block(buf, capacity):
+ length = 0
+ for b in pull_bytes(buf, capacity):
+ length = (length << 8) | b
+ end = buf._pos + length
+ yield end
+ assert buf._pos == end
+
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.