Update README.md
Browse files
README.md
CHANGED
@@ -63,21 +63,21 @@ from qwen_vl_utils import process_vision_info
|
|
63 |
|
64 |
# Load the model
|
65 |
model = Qwen2VLForConditionalGeneration.from_pretrained(
|
66 |
-
"prithivMLmods/
|
67 |
torch_dtype="auto",
|
68 |
device_map="auto"
|
69 |
)
|
70 |
|
71 |
# Optional: Flash Attention for performance optimization
|
72 |
# model = Qwen2VLForConditionalGeneration.from_pretrained(
|
73 |
-
# "prithivMLmods/
|
74 |
# torch_dtype=torch.bfloat16,
|
75 |
# attn_implementation="flash_attention_2",
|
76 |
# device_map="auto",
|
77 |
# )
|
78 |
|
79 |
# Load processor
|
80 |
-
processor = AutoProcessor.from_pretrained("prithivMLmods/
|
81 |
|
82 |
messages = [
|
83 |
{
|
|
|
63 |
|
64 |
# Load the model
|
65 |
model = Qwen2VLForConditionalGeneration.from_pretrained(
|
66 |
+
"prithivMLmods/Imgscope-OCR-2B-0527", # replace with updated model ID if available
|
67 |
torch_dtype="auto",
|
68 |
device_map="auto"
|
69 |
)
|
70 |
|
71 |
# Optional: Flash Attention for performance optimization
|
72 |
# model = Qwen2VLForConditionalGeneration.from_pretrained(
|
73 |
+
# "prithivMLmods/Imgscope-OCR-2B-0527",
|
74 |
# torch_dtype=torch.bfloat16,
|
75 |
# attn_implementation="flash_attention_2",
|
76 |
# device_map="auto",
|
77 |
# )
|
78 |
|
79 |
# Load processor
|
80 |
+
processor = AutoProcessor.from_pretrained("prithivMLmods/Imgscope-OCR-2B-0527")
|
81 |
|
82 |
messages = [
|
83 |
{
|