Text Generation
GGUF
cortex.cpp
conversational
tinyllama / model.yml
van-qa's picture
Update model.yml
42f1b18 verified
raw
history blame
560 Bytes
name: tinyllama
model: tinyllama:1B
version: 1
files:
- llama_model_path: model.gguf
# Results Preferences
stop:
- </s>
top_p: 0.95
temperature: 0.7
frequency_penalty: 0
presence_penalty: 0
max_tokens: 4096 # Infer from base config.json -> max_position_embeddings
stream: true # true | false
# Engine / Model Settings
ngl: 33 # Infer from base config.json -> num_attention_heads
ctx_len: 4096 # Infer from base config.json -> max_position_embeddings
engine: cortex.llamacpp
prompt_template: "<|system|>\n{system_message}<|user|>\n{prompt}<|assistant|>"