Llama-Ghanaba-AI / main.py
appohfaiths's picture
feat: implement llama v2 as base model
683ab32
raw
history blame contribute delete
723 Bytes
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizerFast, ViTImageProcessorFast, Wav2Vec2FeatureExtractor, Wav2Vec2Processor
config = LlamaConfig.from_pretrained("./config.json")
model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B",config=config)
tokenizer = LlamaTokenizerFast.from_pretrained("meta-llama/Llama-3.2-1B")
vit_extractor = ViTImageProcessorFast.from_pretrained("google/vit-base-patch16-224")
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h")
processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
model.push_to_hub("appohfaiths/Llama-Ghanaba-AI")
tokenizer.push_to_hub("appohfaiths/Llama-Ghanaba-AI")