Update README.md
Browse files
README.md
CHANGED
@@ -82,7 +82,12 @@ conv.append_message(conv.roles[1], None)
|
|
82 |
prompt_question = conv.get_prompt()
|
83 |
|
84 |
# - Create model inputs
|
85 |
-
input_ids = tokenizer_image_token(
|
|
|
|
|
|
|
|
|
|
|
86 |
image_sizes = [image.size]
|
87 |
|
88 |
# - Generate model response
|
@@ -99,7 +104,11 @@ output = model.generate(
|
|
99 |
temperature=temperature if do_sample else None,
|
100 |
max_new_tokens=max_new_tokens,
|
101 |
)
|
102 |
-
output_parsed= tokenizer.decode(
|
|
|
|
|
|
|
|
|
103 |
|
104 |
# - Process response as you wish ...
|
105 |
#response= output_parsed.strip("\n").strip()
|
|
|
82 |
prompt_question = conv.get_prompt()
|
83 |
|
84 |
# - Create model inputs
|
85 |
+
input_ids = tokenizer_image_token(
|
86 |
+
prompt_question,
|
87 |
+
tokenizer,
|
88 |
+
IMAGE_TOKEN_INDEX,
|
89 |
+
return_tensors="pt"
|
90 |
+
).unsqueeze(0).to(model.device)
|
91 |
image_sizes = [image.size]
|
92 |
|
93 |
# - Generate model response
|
|
|
104 |
temperature=temperature if do_sample else None,
|
105 |
max_new_tokens=max_new_tokens,
|
106 |
)
|
107 |
+
output_parsed= tokenizer.decode(
|
108 |
+
output[0],
|
109 |
+
skip_special_tokens=True,
|
110 |
+
clean_up_tokenization_spaces=False
|
111 |
+
)
|
112 |
|
113 |
# - Process response as you wish ...
|
114 |
#response= output_parsed.strip("\n").strip()
|