Upload 4 files
Browse files- .gitattributes +1 -0
- app.py +91 -0
- banana_disease.py +132 -0
- banana_disease_densenet121.keras +3 -0
- class_names.npy +3 -0
.gitattributes
CHANGED
@@ -35,3 +35,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
banana_disease.keras filter=lfs diff=lfs merge=lfs -text
|
37 |
banana_disease_model.keras filter=lfs diff=lfs merge=lfs -text
|
|
|
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
banana_disease.keras filter=lfs diff=lfs merge=lfs -text
|
37 |
banana_disease_model.keras filter=lfs diff=lfs merge=lfs -text
|
38 |
+
banana_disease_densenet121.keras filter=lfs diff=lfs merge=lfs -text
|
app.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import tensorflow as tf
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
# Load the saved model
|
6 |
+
model = tf.keras.models.load_model('banana_disease_densenet121.keras')
|
7 |
+
|
8 |
+
# Load class names
|
9 |
+
class_names = np.load("class_names.npy", allow_pickle=True)
|
10 |
+
|
11 |
+
# Preprocess the input image
|
12 |
+
def preprocess_image(img):
|
13 |
+
if img is None:
|
14 |
+
return None
|
15 |
+
img = img.resize((256, 256)) # Resize to match training
|
16 |
+
img = np.array(img) / 255.0 # Normalize
|
17 |
+
img = np.expand_dims(img, axis=0) # Add batch dimension
|
18 |
+
return img
|
19 |
+
|
20 |
+
# Prediction function
|
21 |
+
def predict_disease(img):
|
22 |
+
img = preprocess_image(img)
|
23 |
+
if img is None:
|
24 |
+
return "⚠️ No image provided", None
|
25 |
+
predictions = model.predict(img)[0]
|
26 |
+
predicted_class = np.argmax(predictions)
|
27 |
+
confidence_scores = {class_names[i]: float(predictions[i]) for i in range(len(class_names))}
|
28 |
+
return f"Predicted: {class_names[predicted_class]}", confidence_scores
|
29 |
+
|
30 |
+
|
31 |
+
# 🌿 Custom theme
|
32 |
+
custom_theme = gr.themes.Soft()
|
33 |
+
|
34 |
+
# UI
|
35 |
+
with gr.Blocks(theme=custom_theme, css="""
|
36 |
+
#title {
|
37 |
+
font-size: 32px;
|
38 |
+
font-weight: bold;
|
39 |
+
text-align: center;
|
40 |
+
color: white;
|
41 |
+
animation: fadeIn 2s ease-in-out;
|
42 |
+
}
|
43 |
+
#subtitle {
|
44 |
+
text-align: center;
|
45 |
+
font-size: 16px;
|
46 |
+
color: #ccc;
|
47 |
+
animation: fadeInUp 2s ease-in-out;
|
48 |
+
}
|
49 |
+
#prediction_box {
|
50 |
+
font-size: 18px;
|
51 |
+
padding: 12px;
|
52 |
+
border-radius: 8px;
|
53 |
+
min-height: 60px; /* ensures readable area */
|
54 |
+
word-wrap: break-word;
|
55 |
+
white-space: normal;
|
56 |
+
}
|
57 |
+
.gr-button {
|
58 |
+
transition: 0.3s;
|
59 |
+
}
|
60 |
+
.gr-button:hover {
|
61 |
+
transform: scale(1.05);
|
62 |
+
box-shadow: 0px 4px 20px rgba(0, 128, 0, 0.4);
|
63 |
+
}
|
64 |
+
@keyframes fadeIn {
|
65 |
+
from {opacity: 0;}
|
66 |
+
to {opacity: 1;}
|
67 |
+
}
|
68 |
+
@keyframes fadeInUp {
|
69 |
+
from {opacity: 0; transform: translateY(10px);}
|
70 |
+
to {opacity: 1; transform: translateY(0);}
|
71 |
+
}
|
72 |
+
""") as demo:
|
73 |
+
|
74 |
+
gr.Markdown("<div id='title'>Banana Leaf Disease Classifier</div>")
|
75 |
+
gr.Markdown("<div id='subtitle'>Upload a banana leaf image, and our AI will diagnose the disease</div>")
|
76 |
+
|
77 |
+
with gr.Row():
|
78 |
+
with gr.Column(scale=1):
|
79 |
+
image_input = gr.Image(type="pil", image_mode="RGB", label="Upload Banana Leaf")
|
80 |
+
predict_btn = gr.Button("🔍 Predict Disease", variant="primary", elem_id="predict_btn")
|
81 |
+
|
82 |
+
with gr.Column(scale=1):
|
83 |
+
# ✅ Changed from Textbox to Text
|
84 |
+
output_label = gr.Text(label="Prediction", interactive=False, elem_id="prediction_box")
|
85 |
+
with gr.Accordion("Confidence Scores", open=True):
|
86 |
+
output_conf = gr.Label(label="Scores")
|
87 |
+
|
88 |
+
predict_btn.click(fn=predict_disease, inputs=image_input, outputs=[output_label, output_conf])
|
89 |
+
|
90 |
+
# Launch app
|
91 |
+
demo.launch(share=True)
|
banana_disease.py
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import shutil
|
3 |
+
import random
|
4 |
+
import numpy as np
|
5 |
+
import tensorflow as tf
|
6 |
+
from tensorflow.keras import layers, models
|
7 |
+
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
8 |
+
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
|
9 |
+
from tensorflow.keras.applications.densenet import DenseNet121, preprocess_input
|
10 |
+
|
11 |
+
# ---------------------------
|
12 |
+
# Clear session
|
13 |
+
# ---------------------------
|
14 |
+
tf.keras.backend.clear_session()
|
15 |
+
|
16 |
+
# ---------------------------
|
17 |
+
# Paths
|
18 |
+
# ---------------------------
|
19 |
+
DATA_DIR = "/kaggle/input/Banana Disease Recognition Dataset/Original Images/Original Images"
|
20 |
+
BASE_DIR = "/kaggle/working/banana_split"
|
21 |
+
TRAIN_DIR = os.path.join(BASE_DIR, "train")
|
22 |
+
VAL_DIR = os.path.join(BASE_DIR, "val")
|
23 |
+
|
24 |
+
# ---------------------------
|
25 |
+
# Create train/val split
|
26 |
+
# ---------------------------
|
27 |
+
os.makedirs(TRAIN_DIR, exist_ok=True)
|
28 |
+
os.makedirs(VAL_DIR, exist_ok=True)
|
29 |
+
|
30 |
+
for cls in os.listdir(DATA_DIR):
|
31 |
+
cls_path = os.path.join(DATA_DIR, cls)
|
32 |
+
if not os.path.isdir(cls_path):
|
33 |
+
continue
|
34 |
+
os.makedirs(os.path.join(TRAIN_DIR, cls), exist_ok=True)
|
35 |
+
os.makedirs(os.path.join(VAL_DIR, cls), exist_ok=True)
|
36 |
+
|
37 |
+
files = [f for f in os.listdir(cls_path) if os.path.isfile(os.path.join(cls_path, f))]
|
38 |
+
random.shuffle(files)
|
39 |
+
split_idx = int(0.8 * len(files))
|
40 |
+
|
41 |
+
for f in files[:split_idx]:
|
42 |
+
shutil.copy(os.path.join(cls_path, f), os.path.join(TRAIN_DIR, cls, f))
|
43 |
+
for f in files[split_idx:]:
|
44 |
+
shutil.copy(os.path.join(cls_path, f), os.path.join(VAL_DIR, cls, f))
|
45 |
+
|
46 |
+
print("✅ Dataset successfully split into train & val folders")
|
47 |
+
|
48 |
+
# ---------------------------
|
49 |
+
# Parameters
|
50 |
+
# ---------------------------
|
51 |
+
IMG_SIZE = (256, 256)
|
52 |
+
BATCH_SIZE = 32
|
53 |
+
EPOCHS = 30
|
54 |
+
|
55 |
+
# ---------------------------
|
56 |
+
# Data Generators
|
57 |
+
# ---------------------------
|
58 |
+
train_datagen = ImageDataGenerator(
|
59 |
+
preprocessing_function=preprocess_input,
|
60 |
+
rotation_range=90,
|
61 |
+
horizontal_flip=True,
|
62 |
+
vertical_flip=True,
|
63 |
+
zoom_range=0.2
|
64 |
+
)
|
65 |
+
|
66 |
+
val_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
|
67 |
+
|
68 |
+
train_generator = train_datagen.flow_from_directory(
|
69 |
+
TRAIN_DIR,
|
70 |
+
target_size=IMG_SIZE,
|
71 |
+
batch_size=BATCH_SIZE,
|
72 |
+
class_mode="categorical",
|
73 |
+
color_mode="rgb"
|
74 |
+
)
|
75 |
+
|
76 |
+
val_generator = val_datagen.flow_from_directory(
|
77 |
+
VAL_DIR,
|
78 |
+
target_size=IMG_SIZE,
|
79 |
+
batch_size=BATCH_SIZE,
|
80 |
+
class_mode="categorical",
|
81 |
+
color_mode="rgb"
|
82 |
+
)
|
83 |
+
|
84 |
+
# ---------------------------
|
85 |
+
# Build model - DenseNet121
|
86 |
+
# ---------------------------
|
87 |
+
num_classes = train_generator.num_classes
|
88 |
+
|
89 |
+
base_model = DenseNet121(
|
90 |
+
include_top=False,
|
91 |
+
weights='imagenet',
|
92 |
+
input_shape=(IMG_SIZE[0], IMG_SIZE[1], 3)
|
93 |
+
)
|
94 |
+
|
95 |
+
base_model.trainable = False # Freeze initially
|
96 |
+
|
97 |
+
x = layers.GlobalAveragePooling2D()(base_model.output)
|
98 |
+
x = layers.Dropout(0.4)(x)
|
99 |
+
output = layers.Dense(num_classes, activation='softmax')(x)
|
100 |
+
|
101 |
+
model = models.Model(inputs=base_model.input, outputs=output)
|
102 |
+
|
103 |
+
model.compile(
|
104 |
+
optimizer=tf.keras.optimizers.Adam(),
|
105 |
+
loss="categorical_crossentropy",
|
106 |
+
metrics=["accuracy"]
|
107 |
+
)
|
108 |
+
|
109 |
+
model.summary()
|
110 |
+
|
111 |
+
# ---------------------------
|
112 |
+
# Callbacks
|
113 |
+
# ---------------------------
|
114 |
+
early_stop = EarlyStopping(monitor="val_loss", patience=7, restore_best_weights=True, verbose=1)
|
115 |
+
lr_reduce = ReduceLROnPlateau(monitor="val_loss", factor=0.2, patience=3, verbose=1)
|
116 |
+
|
117 |
+
# ---------------------------
|
118 |
+
# Train
|
119 |
+
# ---------------------------
|
120 |
+
history = model.fit(
|
121 |
+
train_generator,
|
122 |
+
validation_data=val_generator,
|
123 |
+
epochs=EPOCHS,
|
124 |
+
callbacks=[early_stop, lr_reduce]
|
125 |
+
)
|
126 |
+
|
127 |
+
# ---------------------------
|
128 |
+
# Save class names & model
|
129 |
+
# ---------------------------
|
130 |
+
np.save("class_names.npy", np.array(list(train_generator.class_indices.keys())))
|
131 |
+
model.save("banana_disease_densenet121.keras")
|
132 |
+
print("✅ Training complete. Model saved as 'banana_disease_densenet121.keras'")
|
banana_disease_densenet121.keras
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d3667cd9cf42475cb6c81488380fcf47d4776e64e50c2dae807d7755476b41c9
|
3 |
+
size 29746567
|
class_names.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8603247d4942390725f3051f812f316528334d181bcc757fb3a74e0f5a25b553
|
3 |
+
size 1052
|