sriggi commited on
Commit
f333cb5
·
verified ·
1 Parent(s): a064035

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +11 -2
README.md CHANGED
@@ -82,7 +82,12 @@ conv.append_message(conv.roles[1], None)
82
  prompt_question = conv.get_prompt()
83
 
84
  # - Create model inputs
85
- input_ids = tokenizer_image_token(prompt_question, tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(model.device)
 
 
 
 
 
86
  image_sizes = [image.size]
87
 
88
  # - Generate model response
@@ -99,7 +104,11 @@ output = model.generate(
99
  temperature=temperature if do_sample else None,
100
  max_new_tokens=max_new_tokens,
101
  )
102
- output_parsed= tokenizer.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
 
 
 
 
103
 
104
  # - Process response as you wish ...
105
  #response= output_parsed.strip("\n").strip()
 
82
  prompt_question = conv.get_prompt()
83
 
84
  # - Create model inputs
85
+ input_ids = tokenizer_image_token(
86
+ prompt_question,
87
+ tokenizer,
88
+ IMAGE_TOKEN_INDEX,
89
+ return_tensors="pt"
90
+ ).unsqueeze(0).to(model.device)
91
  image_sizes = [image.size]
92
 
93
  # - Generate model response
 
104
  temperature=temperature if do_sample else None,
105
  max_new_tokens=max_new_tokens,
106
  )
107
+ output_parsed= tokenizer.decode(
108
+ output[0],
109
+ skip_special_tokens=True,
110
+ clean_up_tokenization_spaces=False
111
+ )
112
 
113
  # - Process response as you wish ...
114
  #response= output_parsed.strip("\n").strip()