Update README.md
Browse files
README.md
CHANGED
@@ -20,7 +20,7 @@ tags:
|
|
20 |
|
21 |
# **siglip2-x384-explicit-content**
|
22 |
|
23 |
-
> **siglip2-
|
24 |
|
25 |
---
|
26 |
|
@@ -55,7 +55,7 @@ from PIL import Image
|
|
55 |
import torch
|
56 |
|
57 |
# Load model and processor
|
58 |
-
model_name = "prithivMLmods/siglip2-
|
59 |
model = SiglipForImageClassification.from_pretrained(model_name)
|
60 |
processor = AutoImageProcessor.from_pretrained(model_name)
|
61 |
|
@@ -88,7 +88,7 @@ iface = gr.Interface(
|
|
88 |
fn=classify_explicit_content,
|
89 |
inputs=gr.Image(type="numpy"),
|
90 |
outputs=gr.Label(num_top_classes=5, label="Predicted Content Type"),
|
91 |
-
title="siglip2-
|
92 |
description="Classifies images as Anime, Hentai, Pornography, Enticing, or Safe for use in moderation systems."
|
93 |
)
|
94 |
|
|
|
20 |
|
21 |
# **siglip2-x384-explicit-content**
|
22 |
|
23 |
+
> **siglip2-x256p32-explicit-content** is a vision-language encoder model fine-tuned from **siglip2-base-patch32-256** for **multi-class image classification**. Based on the **SiglipForImageClassification** architecture, this model is designed to detect and categorize various forms of visual content, from safe to explicit, making it ideal for content moderation and media filtering.
|
24 |
|
25 |
---
|
26 |
|
|
|
55 |
import torch
|
56 |
|
57 |
# Load model and processor
|
58 |
+
model_name = "prithivMLmods/siglip2-x256p32-explicit-content" # Replace with your HF model path if needed
|
59 |
model = SiglipForImageClassification.from_pretrained(model_name)
|
60 |
processor = AutoImageProcessor.from_pretrained(model_name)
|
61 |
|
|
|
88 |
fn=classify_explicit_content,
|
89 |
inputs=gr.Image(type="numpy"),
|
90 |
outputs=gr.Label(num_top_classes=5, label="Predicted Content Type"),
|
91 |
+
title= "siglip2-x256p32-explicit-content",
|
92 |
description="Classifies images as Anime, Hentai, Pornography, Enticing, or Safe for use in moderation systems."
|
93 |
)
|
94 |
|