anon-repair-bot commited on
Commit
3277324
·
verified ·
1 Parent(s): c052ad0

Fix: Add missing imports and URL image loading in example code

Browse files

## Description

This PR fixes runtime errors in the example code of the model card.

## Changes

- Added missing imports:
import numpy as np
import torch
import torch.nn.functional as F
import requests
from io import BytesIO

- Defined missing variable:
model_input_size = [1024, 1024]

- Replaced Image.open(image_path) with code that properly loads images from a URL:
response = requests.get(image_path)
orig_image = Image.open(BytesIO(response.content))

## Testing
The code has been successfully tested and runs without error.

## Note
This contribution is part of an ongoing research initiative to systematically identify and correct faulty example code in Hugging Face Model Cards.
We would appreciate a timely review and integration of this patch to support code reliability and enhance reproducibility for downstream users.

Files changed (1) hide show
  1. README.md +10 -1
README.md CHANGED
@@ -124,6 +124,13 @@ pillow_image = pipe(image_path) # applies mask on input and returns a pillow ima
124
 
125
  Or load the model
126
  ```python
 
 
 
 
 
 
 
127
  from transformers import AutoModelForImageSegmentation
128
  from torchvision.transforms.functional import normalize
129
  model = AutoModelForImageSegmentation.from_pretrained("briaai/RMBG-1.4",trust_remote_code=True)
@@ -153,6 +160,7 @@ model.to(device)
153
  image_path = "https://farm5.staticflickr.com/4007/4322154488_997e69e4cf_z.jpg"
154
  orig_im = io.imread(image_path)
155
  orig_im_size = orig_im.shape[0:2]
 
156
  image = preprocess_image(orig_im, model_input_size).to(device)
157
 
158
  # inference
@@ -163,7 +171,8 @@ result_image = postprocess_image(result[0][0], orig_im_size)
163
 
164
  # save result
165
  pil_mask_im = Image.fromarray(result_image)
166
- orig_image = Image.open(image_path)
 
167
  no_bg_image = orig_image.copy()
168
  no_bg_image.putalpha(pil_mask_im)
169
  ```
 
124
 
125
  Or load the model
126
  ```python
127
+ import numpy as np
128
+ import torch
129
+ import torch.nn.functional as F
130
+ import requests
131
+ from io import BytesIO
132
+ from PIL import Image
133
+ from skimage import io
134
  from transformers import AutoModelForImageSegmentation
135
  from torchvision.transforms.functional import normalize
136
  model = AutoModelForImageSegmentation.from_pretrained("briaai/RMBG-1.4",trust_remote_code=True)
 
160
  image_path = "https://farm5.staticflickr.com/4007/4322154488_997e69e4cf_z.jpg"
161
  orig_im = io.imread(image_path)
162
  orig_im_size = orig_im.shape[0:2]
163
+ model_input_size = [1024, 1024]
164
  image = preprocess_image(orig_im, model_input_size).to(device)
165
 
166
  # inference
 
171
 
172
  # save result
173
  pil_mask_im = Image.fromarray(result_image)
174
+ response = requests.get(image_path)
175
+ orig_image = Image.open(BytesIO(response.content))
176
  no_bg_image = orig_image.copy()
177
  no_bg_image.putalpha(pil_mask_im)
178
  ```