prasanna2003's picture
Update README.md
1b67c1b
|
raw
history blame
712 Bytes
metadata
pipeline_tag: image-to-text

Usage:

from transformers import BlipProcessor, BlipForConditionalGeneration
import torch
from PIL import Image

processor = BlipProcessor.from_pretrained("prasanna2003/blip-image-captioning")
if processor.tokenizer.eos_token is None:
    processor.tokenizer.eos_token = '<|eos|>'
model = BlipForConditionalGeneration.from_pretrained("prasanna2003/blip-image-captioning")

image = Image.open('file_name.jpg').convert('RGB')

prompt = """Instruction: Answer the following input according to the image.
output: """

inputs = processor(image, prompt, return_tensors="pt")

output = model.generate(**inputs, max_length=100)
print(processor.tokenizer.decode(output[0]))