pacomesimon commited on
Commit
f69cf27
·
1 Parent(s): 6588ec1

debug: plot naming

Browse files
app.py CHANGED
@@ -1,11 +1,350 @@
 
 
1
  import gradio as gr
 
 
 
 
 
2
 
 
3
 
4
- def greet(name):
5
- return "Hello " + name + "!"
 
6
 
 
7
 
8
- demo = gr.Interface(fn=greet, inputs="textbox", outputs="textbox")
 
 
 
 
 
9
 
10
- if __name__ == "__main__":
11
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
  import gradio as gr
4
+ import time
5
+ from collections import deque
6
+ import matplotlib.pyplot as plt
7
+ from ultralytics import YOLO
8
+ import os
9
 
10
+ # Dummy comment to test push
11
 
12
+ def compare_images_optical_flow(img1, img2):
13
+ """
14
+ Compares two images and returns a grayscale image of flow magnitude normalized to 0 - 1.
15
 
16
+ Args:
17
 
18
+ Returns:
19
+ A grayscale image of flow magnitude normalized to 0 - 1, or None if an error occurs.
20
+ """
21
+ # Convert images to grayscale
22
+ gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
23
+ gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
24
 
25
+
26
+ # Calculate optical flow using Farneback method
27
+ flow = cv2.calcOpticalFlowFarneback(gray1, gray2, None, 0.5, 3, 15, 3, 5, 1.2, 0)
28
+
29
+ # Calculate the magnitude of the optical flow
30
+ flow_magnitude = np.sqrt(flow[..., 0]**2 + flow[..., 1]**2)
31
+
32
+ # # Normalize the magnitude to the range 0-1
33
+ # flow_magnitude_normalized = cv2.normalize(flow_magnitude, None, 0, 1, cv2.NORM_MINMAX, cv2.CV_32F)
34
+
35
+ #The output is already a grayscale image. No need to convert it.
36
+ return flow_magnitude
37
+
38
+ model = YOLO("yolov8s-world.pt")
39
+ # Define custom classes
40
+ CUSTOM_CLASSES = ["one bird", "one airplane", "one kite","a flying object","sky"]
41
+ model.set_classes(CUSTOM_CLASSES)
42
+
43
+ def detect_birds(image):
44
+ results = model(image,
45
+ conf = 0.1,
46
+ verbose=False,
47
+
48
+ )
49
+ return results[0].plot()
50
+
51
+ optical_flow_runtime = []
52
+ object_detection_runtime = []
53
+ change_detection_runtime = []
54
+ example_videos_folder = "./example_videos"
55
+
56
+ EXAMPLE_VIDEOS_LIST = os.listdir(example_videos_folder)
57
+ EXAMPLE_VIDEOS_LIST = [os.path.join(example_videos_folder, v)
58
+ for v in EXAMPLE_VIDEOS_LIST]
59
+
60
+ HEIGHT_STANDARD = 480
61
+ WIDTH_STANDARD = 640
62
+ frame_stack = deque(maxlen=2)
63
+ detection_stack = deque(maxlen=1)
64
+
65
+ fall_back_frame = np.zeros((256, 256, 3), dtype=np.uint8) + 127
66
+ flow_magnitude_normalized = np.zeros((256, 256), dtype=np.uint8)
67
+ FLAGS = {
68
+ "OBJECT_DETECTING": False,
69
+ }
70
+ CAP = []
71
+
72
+ # Function to compute optical flow
73
+ def compute_optical_flow(mean_norm = None):
74
+ global FLAGS, flow_magnitude_normalized, frame_stack
75
+ if mean_norm is None:
76
+ mean_norm = .4
77
+ else:
78
+ mean_norm = float(mean_norm)
79
+ FLAGS["OBJECT_DETECTING"] = False
80
+ while True:
81
+ if (len(frame_stack) > 1) and not(FLAGS["OBJECT_DETECTING"]): #
82
+
83
+ prev_frame, curr_frame = frame_stack
84
+ original_height, original_width = curr_frame.shape[:2]
85
+ start_time = time.time() # Start timing
86
+ prev_frame_resized, curr_frame_resized = [
87
+ cv2.resize(
88
+ frame,
89
+ (original_width // 4, original_height // 4)
90
+ ) for frame in [prev_frame, curr_frame]
91
+ ]
92
+ flow_magnitude = compare_images_optical_flow(prev_frame_resized,
93
+ curr_frame_resized)
94
+ end_time = time.time() # End timing
95
+ optical_flow_runtime.append(end_time - start_time) # Append the elapsed time
96
+
97
+ flow_magnitude_normalized = cv2.normalize(flow_magnitude, None, 0, 1, cv2.NORM_MINMAX, cv2.CV_32F)
98
+ flow_magnitude_normalized = cv2.resize(
99
+ flow_magnitude_normalized,
100
+ (original_width, original_height)
101
+ )
102
+ yield flow_magnitude_normalized
103
+
104
+ if flow_magnitude_normalized.mean() < mean_norm:
105
+ detection_stack.append((curr_frame,prev_frame, flow_magnitude_normalized))
106
+ else:
107
+ yield np.stack((flow_magnitude_normalized,flow_magnitude_normalized*0, flow_magnitude_normalized*0), axis=-1)
108
+
109
+ # Function to perform object detection
110
+ def object_detection_stream(classes = ""):
111
+ if classes.strip() == "":
112
+ classes = "one bird, one airplane, one kite,a flying object,sky"
113
+ classes_list = classes.split(",")
114
+ global FLAGS, fall_back_frame, model
115
+ model.set_classes(classes_list)
116
+
117
+ detected_frame = fall_back_frame.copy()
118
+ while True:
119
+ if len(detection_stack)>0:
120
+ FLAGS["OBJECT_DETECTING"] = True
121
+ curr_frame, prev_frame, flow_magnitude_normalized = detection_stack.pop()
122
+ frame = curr_frame
123
+ start_time = time.time() # Start timing
124
+ detected_frame = detect_birds(frame)
125
+ end_time = time.time() # End timing
126
+ object_detection_runtime.append(end_time - start_time) # Append the elapsed time
127
+ FLAGS["OBJECT_DETECTING"] = False
128
+ yield detected_frame
129
+ FLAGS["OBJECT_DETECTING"] = False
130
+
131
+ def change_detection_stream(useless_var = None):
132
+ detected_frame = fall_back_frame.copy()
133
+ while True:
134
+ if len(detection_stack)>0:
135
+ FLAGS["OBJECT_DETECTING"] = True
136
+ curr_frame, prev_frame, flow_magnitude_normalized = detection_stack.pop()
137
+ frame = curr_frame
138
+ start_time = time.time() # Start timing
139
+ ret, thresh = cv2.threshold((flow_magnitude_normalized*255).astype(np.uint8),
140
+ 127, 255, 0)
141
+ contours_tuple= cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
142
+ contours = contours_tuple[0] if len(contours_tuple) == 2 else contours_tuple[1]
143
+ detected_frame = frame.copy()
144
+ for contour in contours:
145
+ x, y, w, h = cv2.boundingRect(contour)
146
+ cv2.rectangle(detected_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
147
+ end_time = time.time() # End timing
148
+ change_detection_runtime.append(end_time - start_time) # Append the elapsed time
149
+ FLAGS["OBJECT_DETECTING"] = False
150
+ yield detected_frame
151
+ FLAGS["OBJECT_DETECTING"] = False
152
+
153
+ def video_stream(frame_rate = ""):
154
+ if frame_rate.strip() == "":
155
+ frame_rate = 2.0
156
+ else:
157
+ frame_rate = float(frame_rate)
158
+ if len(CAP) > 0:
159
+ while True:
160
+ cap = cv2.VideoCapture(CAP[-1])
161
+ ret, frame = cap.read()
162
+ while ret:
163
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
164
+ frame_stack.append(
165
+ cv2.resize(
166
+ frame,
167
+ (WIDTH_STANDARD, HEIGHT_STANDARD) # Resize the frame
168
+ )
169
+ )
170
+ yield frame
171
+ ret, frame = cap.read()
172
+ time.sleep(1/frame_rate)
173
+ else:
174
+ yield fall_back_frame
175
+
176
+ def yield_frame(s):
177
+ while True:
178
+ yield frame_stack[0]
179
+ # Gradio interface
180
+ with gr.Blocks() as demo:
181
+ with gr.Tab("Using a custom Video"):
182
+ with gr.Row():
183
+ with gr.Column():
184
+ with gr.Row():
185
+ video = gr.Video(label="Video Source")
186
+ with gr.Row():
187
+ examples = gr.Examples(
188
+ examples=EXAMPLE_VIDEOS_LIST,
189
+ inputs=[video],
190
+ )
191
+
192
+ with gr.Column():
193
+ webcam_img = gr.Interface(video_stream,
194
+ inputs=gr.Textbox(label="Acquisition: Enter the frame rate", value = 2.0), #
195
+ outputs="image")
196
+ with gr.Row():
197
+ with gr.Column():
198
+ optical_flow_img = gr.Interface(compute_optical_flow,
199
+ inputs=gr.Slider(label="Optical Flow: Noise Tolerance", minimum=0.0, maximum=1.0, value=0.4),
200
+ outputs=gr.Image(),#,"image",
201
+ )
202
+ with gr.Column():
203
+ detection_img = gr.Interface(object_detection_stream,
204
+ inputs=gr.Textbox(label="Classes: Enter the classes", value = "one bird, one airplane, one kite,a flying object,sky"),
205
+ outputs="image")
206
+
207
+ video.change(
208
+ fn=lambda video: CAP.append(video),
209
+ inputs=[video],
210
+ )
211
+
212
+ with gr.Tab("Using a custom Video (Change Detection)"):
213
+ with gr.Row():
214
+ with gr.Column():
215
+ with gr.Row():
216
+ video_CD = gr.Video(label="Video Source")
217
+ with gr.Row():
218
+ examples_CD = gr.Examples(
219
+ examples=EXAMPLE_VIDEOS_LIST,
220
+ inputs=[video_CD],
221
+ )
222
+
223
+ with gr.Column():
224
+ webcam_img_CD = gr.Interface(video_stream,
225
+ inputs=gr.Textbox(label="Acquisition: Enter the frame rate", value = 2.0), #
226
+ outputs="image")
227
+ with gr.Row():
228
+ with gr.Column():
229
+ optical_flow_img_CD = gr.Interface(compute_optical_flow,
230
+ inputs=gr.Slider(label="Optical Flow: Noise Tolerance", minimum=0.0, maximum=1.0, value=0.4),
231
+ outputs=gr.Image(),#,"image",
232
+ )
233
+ with gr.Column():
234
+ detection_img_CD = gr.Interface(change_detection_stream,
235
+ inputs=gr.Textbox(label="Change detection", value = "DUMMY"),
236
+ outputs="image")
237
+
238
+ video_CD.change(
239
+ fn=lambda video: CAP.append(video),
240
+ inputs=[video_CD],
241
+ )
242
+
243
+
244
+ with gr.Tab("Using a Real Time Camera"):
245
+ with gr.Row():
246
+
247
+ with gr.Column():
248
+ webcam_img_RT = gr.Image(label="Webcam", sources="webcam")
249
+ webcam_img_RT.stream(lambda s: frame_stack.append(
250
+ cv2.resize(
251
+ s,
252
+ (WIDTH_STANDARD, HEIGHT_STANDARD)
253
+ )
254
+ ),
255
+ webcam_img_RT,
256
+ time_limit=15, stream_every=1.0,
257
+ concurrency_limit=30
258
+ )
259
+
260
+ with gr.Column():
261
+ optical_flow_img_RT = gr.Interface(compute_optical_flow,
262
+ inputs=gr.Slider(label="Optical Flow: Noise Tolerance", minimum=0.0, maximum=1.0, value=0.4),
263
+ outputs="image",
264
+ )
265
+
266
+
267
+ with gr.Row():
268
+
269
+ with gr.Column():
270
+ detection_img_RT = gr.Interface(object_detection_stream,
271
+ inputs=gr.Textbox(label="Classes: Enter the classes",
272
+ value = "one bird, one airplane, one kite,a flying object,sky"),
273
+ outputs="image")
274
+
275
+
276
+
277
+ with gr.Tab("Using a Real Time Camera (Change Detection)"):
278
+ with gr.Row():
279
+
280
+ with gr.Column():
281
+ webcam_img_RT_CD = gr.Image(label="Webcam", sources="webcam")
282
+ webcam_img_RT_CD.stream(lambda s: frame_stack.append(
283
+ cv2.resize(
284
+ s,
285
+ (WIDTH_STANDARD, HEIGHT_STANDARD)
286
+ )
287
+ ),
288
+ webcam_img_RT_CD,
289
+ time_limit=15, stream_every=1.0,
290
+ concurrency_limit=30
291
+ )
292
+
293
+ with gr.Column():
294
+ optical_flow_img_RT_CD = gr.Interface(compute_optical_flow,
295
+ inputs=gr.Slider(label="Optical Flow: Noise Tolerance", minimum=0.0, maximum=1.0, value=0.4),
296
+ outputs="image",
297
+ )
298
+
299
+
300
+
301
+ with gr.Row():
302
+
303
+ with gr.Column():
304
+ detection_img_RT_CD = gr.Interface(change_detection_stream,
305
+ inputs=gr.Textbox(label="Changes will be detected here",
306
+ value = "DUMMY"),
307
+ outputs="image")
308
+
309
+ with gr.Tab("Runtime Histograms"):
310
+ def plot_histogram(data, title, color):
311
+ plt.figure(figsize=(9, 5))
312
+ plt.hist(data, bins=30, color=color, alpha=0.7)
313
+ plt.title(title)
314
+ plt.xlabel('Runtime (seconds)')
315
+ plt.ylabel('Frequency')
316
+ plt.grid(True)
317
+ plt.tight_layout()
318
+ filename = title.replace(" ", "_").lower() + ".png"
319
+ plt.savefig(filename)
320
+ if os.path.exists(filename):
321
+ img_plt = cv2.imread(filename)
322
+ return img_plt
323
+ else:
324
+ return np.zeros((256, 256, 3), dtype=np.uint8) + 127
325
+
326
+ def update_optical_flow_plot():
327
+ return plot_histogram(np.array(optical_flow_runtime), 'Histogram of Optical Flow Runtime', 'blue')
328
+
329
+ def update_object_detection_plot():
330
+ return plot_histogram(object_detection_runtime, 'Histogram of Object Detection Runtime', 'green')
331
+
332
+ def update_change_detection_plot():
333
+ return plot_histogram(change_detection_runtime, 'Histogram of Change Detection Runtime', 'red')
334
+
335
+ with gr.Row():
336
+ optical_flow_image = gr.Image(update_optical_flow_plot, label="Optical Flow Runtime Histogram")
337
+ with gr.Row():
338
+ optical_flow_button = gr.Button("Update Optical Flow Histogram")
339
+ optical_flow_button.click(fn=update_optical_flow_plot, outputs=optical_flow_image)
340
+ with gr.Row():
341
+ object_detection_image = gr.Image(update_object_detection_plot, label="Object Detection Runtime Histogram")
342
+ with gr.Row():
343
+ object_detection_button = gr.Button("Update Object Detection Histogram")
344
+ object_detection_button.click(fn=update_object_detection_plot, outputs=object_detection_image)
345
+ with gr.Row():
346
+ change_detection_image = gr.Image(update_change_detection_plot, label="Change Detection Runtime Histogram")
347
+ with gr.Row():
348
+ change_detection_button = gr.Button("Update Change Detection Histogram")
349
+ change_detection_button.click(fn=update_change_detection_plot, outputs=change_detection_image)
350
+ demo.launch(debug=True)
histogram.png CHANGED
histogram_of_change_detection_runtime.png ADDED
histogram_of_object_detection_runtime.png ADDED
histogram_of_optical_flow_runtime.png ADDED