Update README.md
Browse files
README.md
CHANGED
|
@@ -94,13 +94,14 @@ function's body.
|
|
| 94 |
|
| 95 |
```bibtex
|
| 96 |
# pip install -q transformers
|
|
|
|
| 97 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 98 |
|
| 99 |
checkpoint = "Deci/DeciCoder-1b"
|
| 100 |
device = "cuda" # for GPU usage or "cpu" for CPU usage
|
| 101 |
|
| 102 |
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
| 103 |
-
model = AutoModelForCausalLM.from_pretrained(checkpoint, trust_remote_code=True).to(device)
|
| 104 |
|
| 105 |
inputs = tokenizer.encode("def print_hello_world():", return_tensors="pt").to(device)
|
| 106 |
outputs = model.generate(inputs, max_new_tokens=100)
|
|
|
|
| 94 |
|
| 95 |
```bibtex
|
| 96 |
# pip install -q transformers
|
| 97 |
+
import torch
|
| 98 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 99 |
|
| 100 |
checkpoint = "Deci/DeciCoder-1b"
|
| 101 |
device = "cuda" # for GPU usage or "cpu" for CPU usage
|
| 102 |
|
| 103 |
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
| 104 |
+
model = AutoModelForCausalLM.from_pretrained(checkpoint, torch_dtype=torch.bfloat16, trust_remote_code=True).to(device)
|
| 105 |
|
| 106 |
inputs = tokenizer.encode("def print_hello_world():", return_tensors="pt").to(device)
|
| 107 |
outputs = model.generate(inputs, max_new_tokens=100)
|