| { | |
| "module": "keras_hub.src.models.llama3.llama3_causal_lm", | |
| "class_name": "Llama3CausalLM", | |
| "config": { | |
| "backbone": { | |
| "module": "keras_hub.src.models.llama3.llama3_backbone", | |
| "class_name": "Llama3Backbone", | |
| "config": { | |
| "name": "llama_backbone", | |
| "trainable": true, | |
| "vocabulary_size": 128256, | |
| "num_layers": 32, | |
| "num_query_heads": 32, | |
| "hidden_dim": 4096, | |
| "intermediate_dim": 14336, | |
| "rope_max_wavelength": 500000.0, | |
| "rope_position_scaling_factor": 1.0, | |
| "rope_frequency_adjustment_factor": null, | |
| "rope_low_freq_factor": null, | |
| "rope_high_freq_factor": null, | |
| "rope_pretraining_sequence_length": null, | |
| "num_key_value_heads": 8, | |
| "layer_norm_epsilon": 1e-05, | |
| "dropout": 0 | |
| }, | |
| "registered_name": "keras_hub>Llama3Backbone" | |
| }, | |
| "preprocessor": { | |
| "module": "keras_hub.src.models.llama3.llama3_causal_lm_preprocessor", | |
| "class_name": "Llama3CausalLMPreprocessor", | |
| "config": { | |
| "name": "llama3_causal_lm_preprocessor", | |
| "trainable": true, | |
| "dtype": { | |
| "module": "keras", | |
| "class_name": "DTypePolicy", | |
| "config": { | |
| "name": "bfloat16" | |
| }, | |
| "registered_name": null | |
| }, | |
| "tokenizer": { | |
| "module": "keras_hub.src.models.llama3.llama3_tokenizer", | |
| "class_name": "Llama3Tokenizer", | |
| "config": { | |
| "name": "llama3_tokenizer", | |
| "trainable": true, | |
| "dtype": { | |
| "module": "keras", | |
| "class_name": "DTypePolicy", | |
| "config": { | |
| "name": "int32" | |
| }, | |
| "registered_name": null | |
| }, | |
| "config_file": "tokenizer.json", | |
| "sequence_length": null, | |
| "add_prefix_space": false, | |
| "unsplittable_tokens": [ | |
| "<|begin_of_text|>", | |
| "<|start_header_id|>", | |
| "<|eot_id|>", | |
| "<|end_header_id|>", | |
| "<|end_of_text|>" | |
| ] | |
| }, | |
| "registered_name": "keras_hub>Llama3Tokenizer" | |
| }, | |
| "config_file": "preprocessor.json", | |
| "sequence_length": 1024, | |
| "add_start_token": true, | |
| "add_end_token": true | |
| }, | |
| "registered_name": "keras_hub>Llama3CausalLMPreprocessor" | |
| }, | |
| "name": "llama3_causal_lm" | |
| }, | |
| "registered_name": "keras_hub>Llama3CausalLM" | |
| } |