|
--- |
|
license: mit |
|
tags: |
|
- prompt |
|
- gpt |
|
- persona |
|
datasets: |
|
- fka/awesome-chatgpt-prompts |
|
--- |
|
|
|
# Streaming Inference |
|
```python |
|
from unsloth import FastLanguageModel |
|
max_seq_length = 2048 |
|
dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+ |
|
load_in_4bit = True |
|
model, tokenizer = FastLanguageModel.from_pretrained( |
|
model_name = "imranali291/gpt-base-prompt-generator", |
|
max_seq_length = max_seq_length, |
|
dtype = dtype, |
|
load_in_4bit = load_in_4bit, |
|
) |
|
FastLanguageModel.for_inference(model) # Enable native 2x faster inference |
|
|
|
messages = [ |
|
{"role": "user", "content": "php developer"}, |
|
] |
|
inputs = tokenizer.apply_chat_template( |
|
messages, |
|
tokenize = True, |
|
add_generation_prompt = True, # Must add for generation |
|
return_tensors = "pt", |
|
).to("cuda") |
|
|
|
from transformers import TextStreamer |
|
text_streamer = TextStreamer(tokenizer) |
|
_ = model.generate(input_ids = inputs, streamer = text_streamer, max_new_tokens = 128, use_cache = True) |
|
``` |