Upload meta-llama_Llama-3.1-8B-Instruct_0.py with huggingface_hub
Browse files
meta-llama_Llama-3.1-8B-Instruct_0.py
CHANGED
@@ -14,6 +14,34 @@
|
|
14 |
try:
|
15 |
from huggingface_hub import login
|
16 |
login(new_session=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
with open('meta-llama_Llama-3.1-8B-Instruct_0.txt', 'w') as f:
|
18 |
f.write('Everything was good in meta-llama_Llama-3.1-8B-Instruct_0.txt')
|
19 |
except Exception as e:
|
|
|
14 |
try:
|
15 |
from huggingface_hub import login
|
16 |
login(new_session=False)
|
17 |
+
|
18 |
+
# Use a pipeline as a high-level helper
|
19 |
+
from transformers import pipeline
|
20 |
+
|
21 |
+
pipe = pipeline("text-generation", model="meta-llama/Llama-3.1-8B-Instruct")
|
22 |
+
messages = [
|
23 |
+
{"role": "user", "content": "Who are you?"},
|
24 |
+
]
|
25 |
+
pipe(messages)
|
26 |
+
|
27 |
+
# Load model directly
|
28 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
29 |
+
|
30 |
+
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.1-8B-Instruct")
|
31 |
+
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.1-8B-Instruct")
|
32 |
+
messages = [
|
33 |
+
{"role": "user", "content": "Who are you?"},
|
34 |
+
]
|
35 |
+
inputs = tokenizer.apply_chat_template(
|
36 |
+
messages,
|
37 |
+
add_generation_prompt=True,
|
38 |
+
tokenize=True,
|
39 |
+
return_dict=True,
|
40 |
+
return_tensors="pt",
|
41 |
+
).to(model.device)
|
42 |
+
|
43 |
+
outputs = model.generate(**inputs, max_new_tokens=40)
|
44 |
+
print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
|
45 |
with open('meta-llama_Llama-3.1-8B-Instruct_0.txt', 'w') as f:
|
46 |
f.write('Everything was good in meta-llama_Llama-3.1-8B-Instruct_0.txt')
|
47 |
except Exception as e:
|