pglo commited on
Commit
6fba98f
·
verified ·
1 Parent(s): 0b9ddaa

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -4
README.md CHANGED
@@ -36,10 +36,8 @@ tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
36
  model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
37
 
38
  # Format the input as a chat template
39
- user_turn_1 = "In one season a flower blooms three times. In one year, there is one blooming season. How many times do two flowers bloom in two years? Please include your logic."
40
- assistant_turn_1 = "In one season, a flower blooms three times. In one year, there is one blooming season. Therefore, in two years, there are two blooming seasons. Since each flower blooms three times in one season, in two blooming seasons, each flower will bloom six times. Since there are two flowers, the total number of times they will bloom in two years is 12."
41
- user_turn_2 = "How many times do the two flowers blossom in three years?"
42
- sample = [{'role': 'user', 'content': user_turn_1}, {'role': 'assistant', 'content': assistant_turn_1}, {'role': 'user', 'content': user_turn_2}]
43
  chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
44
 
45
  # Tokenize input and generate output
 
36
  model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
37
 
38
  # Format the input as a chat template
39
+ prompt = "What factors contributed to the fall of the Roman Empire?"
40
+ sample = [{'role': 'user', 'content': prompt}]
 
 
41
  chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
42
 
43
  # Tokenize input and generate output