Update README.md
Browse files
README.md
CHANGED
|
@@ -14,6 +14,25 @@ language:
|
|
| 14 |
<img src="https://cdn-uploads.huggingface.co/production/uploads/64b63f8ad57e02621dc93c8b/3uLNwKHFwEgT2YQ-BGOiH.png" alt="drawing" width="600"/>
|
| 15 |
</p>
|
| 16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
# Base checkpoint
|
| 19 |
augmxnt/shisa-7b-v1
|
|
|
|
| 14 |
<img src="https://cdn-uploads.huggingface.co/production/uploads/64b63f8ad57e02621dc93c8b/3uLNwKHFwEgT2YQ-BGOiH.png" alt="drawing" width="600"/>
|
| 15 |
</p>
|
| 16 |
|
| 17 |
+
# How to use
|
| 18 |
+
|
| 19 |
+
```python
|
| 20 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 21 |
+
import torch
|
| 22 |
+
|
| 23 |
+
tokenizer = AutoTokenizer.from_pretrained("lightblue/karasu-7B")
|
| 24 |
+
model = AutoModelForCausalLM.from_pretrained("lightblue/karasu-7B", torch_dtype=torch.bfloat16, device_map="auto")
|
| 25 |
+
|
| 26 |
+
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
| 27 |
+
|
| 28 |
+
messages = [{"role": "system", "content": "あなたはAIアシスタントです。"}]
|
| 29 |
+
messages.append({"role": "user", "content": "イギリスの首相は誰ですか?"})
|
| 30 |
+
|
| 31 |
+
prompt = tokenizer.apply_chat_template(conversation=messages, add_generation_prompt=True, tokenize=False)
|
| 32 |
+
|
| 33 |
+
pipe(prompt, max_new_tokens=100, do_sample=False, temperature=0.0, return_full_text=False)
|
| 34 |
+
```
|
| 35 |
+
|
| 36 |
|
| 37 |
# Base checkpoint
|
| 38 |
augmxnt/shisa-7b-v1
|