Update README.md
Browse files
README.md
CHANGED
|
@@ -36,10 +36,8 @@ tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
|
|
| 36 |
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
|
| 37 |
|
| 38 |
# Format the input as a chat template
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
user_turn_2 = "How many times do the two flowers blossom in three years?"
|
| 42 |
-
sample = [{'role': 'user', 'content': user_turn_1}, {'role': 'assistant', 'content': assistant_turn_1}, {'role': 'user', 'content': user_turn_2}]
|
| 43 |
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
|
| 44 |
|
| 45 |
# Tokenize input and generate output
|
|
|
|
| 36 |
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
|
| 37 |
|
| 38 |
# Format the input as a chat template
|
| 39 |
+
prompt = "What factors contributed to the fall of the Roman Empire?"
|
| 40 |
+
sample = [{'role': 'user', 'content': prompt}]
|
|
|
|
|
|
|
| 41 |
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
|
| 42 |
|
| 43 |
# Tokenize input and generate output
|