Update README.md
Browse files
README.md
CHANGED
@@ -34,7 +34,7 @@ This is a LoRA (Low-Rank Adaptation) adapter for the Qwen2.5-VL-32B-Instruct mod
|
|
34 |
|
35 |
### Model Sources
|
36 |
|
37 |
-
- **Repository:** [Mark-CHAE/shezhen](https://huggingface.co/Mark-CHAE/
|
38 |
- **Base Model:** [Qwen/Qwen2.5-VL-32B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-32B-Instruct)
|
39 |
|
40 |
## Uses
|
@@ -96,7 +96,7 @@ tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-VL-32B-Instruct")
|
|
96 |
processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-32B-Instruct")
|
97 |
|
98 |
# Load LoRA adapter
|
99 |
-
model = PeftModel.from_pretrained(base_model, "Mark-CHAE/
|
100 |
|
101 |
# Prepare inputs
|
102 |
image = Image.open("tongue_image.jpg")
|
|
|
34 |
|
35 |
### Model Sources
|
36 |
|
37 |
+
- **Repository:** [Mark-CHAE/shezhen](https://huggingface.co/Mark-CHAE/ViTCM-LLM)
|
38 |
- **Base Model:** [Qwen/Qwen2.5-VL-32B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-32B-Instruct)
|
39 |
|
40 |
## Uses
|
|
|
96 |
processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-32B-Instruct")
|
97 |
|
98 |
# Load LoRA adapter
|
99 |
+
model = PeftModel.from_pretrained(base_model, "Mark-CHAE/ViTCM-LLM")
|
100 |
|
101 |
# Prepare inputs
|
102 |
image = Image.open("tongue_image.jpg")
|