Update modeling_videochat_flash.py
Browse files
modeling_videochat_flash.py
CHANGED
|
@@ -679,7 +679,7 @@ class VideoChatFlashQwenForCausalLM(LlavaMetaForCausalLM, Qwen2ForCausalLM_Flash
|
|
| 679 |
|
| 680 |
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
|
| 681 |
if outputs.endswith(stop_str):
|
| 682 |
-
|
| 683 |
|
| 684 |
outputs = outputs.strip()
|
| 685 |
|
|
|
|
| 679 |
|
| 680 |
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
|
| 681 |
if outputs.endswith(stop_str):
|
| 682 |
+
outputs = outputs[: -len(stop_str)]
|
| 683 |
|
| 684 |
outputs = outputs.strip()
|
| 685 |
|