-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathbackend(v1).py
More file actions
677 lines (553 loc) · 29.8 KB
/
backend(v1).py
File metadata and controls
677 lines (553 loc) · 29.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
import os
import time
import psutil
os.environ['OPENROUTER_API_KEY'] = 'API_KEY'
os.environ['OPENROUTER_REFERER_URL'] = 'http://localhost:5000'
# backend.py
from flask import Flask, render_template, request, jsonify, Response
from PIL import Image
import io
import numpy as np
import torch
import cv2
from transformers import pipeline, DetrImageProcessor, DetrForObjectDetection, AutoProcessor, AutoModelForVision2Seq, BitsAndBytesConfig
import os
import base64
from flask_cors import CORS
from openai import OpenAI
from flask import Response
#ngrok//////////////////////////////////////////
from pyngrok import ngrok
import os
# Terminate any existing ngrok processes
ngrok.kill()
# Set your authtoken (replace with your actual authtoken)
ngrok.set_auth_token("2uj1hY6YCtwiD1IQ02001PyowHS_4sx6aBhBaN8yryoPx6Qad")
# Open a tunnel to your Flask app's port (default is 5000)
port = int(os.environ.get("PORT", 5000)) # Get port from environment or default
tunnel = ngrok.connect(port)
# Print the public URL
print("Public URL:", tunnel.public_url)
os.environ["NGROK_URL"] = tunnel.public_url
#//////////////////////////////////
app = Flask(__name__, template_folder="/content/templates")
CORS(app)
app.secret_key = os.urandom(24) # Add secret key for session management
# Create global variables to store last image and depth info
last_image_b64 = None
last_depth_info_lines = None
conversation_history = {} # Store conversation history by session ID
last_scene_description = None # Store the last scene description
# Add variables for tracking model first load
detr_first_load = True
unidepth_first_load = True
# Function to measure memory usage
def log_memory_usage():
process = psutil.Process(os.getpid())
cpu_mem = process.memory_info().rss / (1024 * 1024) # in MB
gpu_mem = torch.cuda.max_memory_allocated() / (1024 * 1024) if torch.cuda.is_available() else 0 # in MB
print(f"Memory Usage - CPU: {cpu_mem:.2f} MB, GPU: {gpu_mem:.2f} MB")
return cpu_mem, gpu_mem
#--- Device Setup ---
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
#--- OpenAI Client Setup for Qwen ---
OPENROUTER_API_KEY = os.environ.get("OPENROUTER_API_KEY")
OPENROUTER_REFERER_URL = os.environ.get("OPENROUTER_REFERER_URL", "http://localhost:5000")
OPENROUTER_TITLE = os.environ.get("OPENROUTER_TITLE", "My Flask App")
if not OPENROUTER_API_KEY:
print("Warning: OPENROUTER_API_KEY environment variable not set. OpenAI calls will fail.")
client = OpenAI(
base_url="https://openrouter.ai/api/v1",
api_key=OPENROUTER_API_KEY,
)
#--- Model Loading ---
from unidepth.models import UniDepthV2 # Import UniDepthV2
def load_detr():
global detr_first_load
print("Loading DETR...")
start_time = time.time()
processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50", revision="no_timm")
model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50", revision="no_timm").to(device)
load_time = (time.time() - start_time) * 1000
load_type = "First load" if detr_first_load else "Cached load"
print(f"DETR Load Time ({load_type}): {load_time:.2f} ms")
log_memory_usage()
detr_first_load = False
return processor, model, model.config.id2label
def load_depth_anything(): # Renamed function to load_unidepthv2 for clarity
global unidepth_first_load
print("Loading UniDepthV2...")
start_time = time.time()
model = UniDepthV2.from_pretrained("lpiccinelli/unidepth-v2-vits14").to(device) # Load UniDepthV2, you can change the variant
model.eval() # Set to evaluation mode
load_time = (time.time() - start_time) * 1000
load_type = "First load" if unidepth_first_load else "Cached load"
print(f"UniDepthV2 Load Time ({load_type}): {load_time:.2f} ms")
log_memory_usage()
unidepth_first_load = False
return model
detr_processor, detr_model, id2label = load_detr()
unidepth_model = load_depth_anything() # Load UniDepthV2 model
def meters_to_feet(meters):
"""Convert meters to feet."""
return meters * 3.28084
def process_depth_info(depth_info_lines):
"""
Process depth information:
1. Sort objects from closest to farthest
2. Keep original meter values and add feet conversion
3. Format consistently with rounded numbers
Args:
depth_info_lines: List of strings with depth info like "- object: center_depth=X"
Returns:
Formatted string with sorted depth information
"""
if not depth_info_lines:
return ""
import re
# Parse depth values and object names
objects_with_depth = []
pattern = r"- (.*): center_depth=([\d.]+)"
for line in depth_info_lines:
match = re.search(pattern, line)
if match:
object_name = match.group(1)
depth_meters = float(match.group(2))
# Convert meters to feet and round both values
depth_feet = meters_to_feet(depth_meters)
objects_with_depth.append((object_name, round(depth_meters), round(depth_feet)))
# Sort by depth (closest first)
sorted_objects = sorted(objects_with_depth, key=lambda x: x[1])
# Format the output with rounded integers
formatted_lines = ["Object-distance pairs:"]
for obj, meters, feet in sorted_objects:
formatted_lines.append(f" {obj}: {meters} meters ({feet} feet)")
return "\n".join(formatted_lines)
def analyze_scene(image, detection_sensitivity=0.3):
"""Performs object detection, depth estimation using UniDepthV2."""
total_start_time = time.time()
# Image Loading Time
img_load_start = time.time()
pil_image = Image.open(io.BytesIO(image))
width, height = pil_image.size
img_load_time = (time.time() - img_load_start) * 1000
print(f"Image Loading Time: {img_load_time:.2f} ms")
# --- DETR Object Detection ---
detr_start_time = time.time()
inputs = detr_processor(images=pil_image, return_tensors="pt").to(device)
with torch.no_grad():
outputs = detr_model(**inputs)
target_sizes = torch.tensor([pil_image.size[::-1]]).to(device)
results = detr_processor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=detection_sensitivity)[0]
detr_time = (time.time() - detr_start_time) * 1000
print(f"DETR Inference Time: {detr_time:.2f} ms")
boxes, scores, labels = results["boxes"], results["scores"], results["labels"]
# --- Depth Estimation using UniDepthV2 ---
depth_start_time = time.time()
rgb = torch.from_numpy(np.array(pil_image)).permute(2, 0, 1).unsqueeze(0).float() / 255.0 # Prepare input for UniDepthV2
rgb = rgb.to(device)
with torch.no_grad():
predictions = unidepth_model.infer(rgb)
depth_map = predictions["depth"].squeeze().cpu().numpy() # Get depth map from predictions
depth_time = (time.time() - depth_start_time) * 1000
print(f"UniDepthV2 Inference Time: {depth_time:.2f} ms")
# --- Visualization processing ---
vis_start_time = time.time()
depth_map_normalized = cv2.normalize(depth_map, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
depth_map_color = cv2.applyColorMap(depth_map_normalized, cv2.COLORMAP_INFERNO)
depth_map_display = Image.fromarray(depth_map_color)
# --- Draw Bounding Boxes ---
image_with_boxes = np.array(pil_image).copy()
np.random.seed(42)
colors = np.random.randint(0, 255, size=(len(id2label), 3), dtype="uint8")
depth_info_lines = []
for i in range(len(boxes)):
score = scores[i].item()
label_idx = labels[i].item()
class_name = id2label[label_idx]
# Skip dining table detections (robust to different spellings)
normalized_class = class_name.lower().replace(" ", "").replace("_", "").replace("-", "")
if normalized_class == "diningtable":
continue
if score >= detection_sensitivity:
x1, y1, x2, y2 = map(int, boxes[i].tolist())
cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
# Get raw depth value in meters
center_depth_meters = depth_map[cy, cx] if 0 <= cy < depth_map.shape[0] and 0 <= cx < depth_map.shape[1] else 0.0
# Convert meters to feet separately and round both
center_depth_feet = meters_to_feet(center_depth_meters)
# Round for display
center_depth_meters_rounded = round(center_depth_meters)
center_depth_feet_rounded = round(center_depth_feet)
color = tuple(map(int, colors[label_idx]))
cv2.rectangle(image_with_boxes, (x1, y1), (x2, y2), color, 2)
label_str = f"{class_name} ({score:.2f})"
(label_width, label_height), _ = cv2.getTextSize(label_str, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
cv2.rectangle(image_with_boxes, (x1, y1 - 20), (x1 + label_width, y1), color, -1)
cv2.putText(image_with_boxes, label_str, (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
# Display both meters and feet with rounded values
depth_label = f"Depth: {center_depth_meters_rounded}m / {center_depth_feet_rounded}ft"
cv2.putText(image_with_boxes, depth_label, (x1, y2 + 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 1, cv2.LINE_AA)
# Store raw depth value in meters
depth_info_lines.append(f"- {class_name}: center_depth={center_depth_meters}")
print(f"Detected: {class_name}, Center Depth: {center_depth_meters_rounded}m, {center_depth_feet_rounded}ft")
image_with_boxes_display = Image.fromarray(image_with_boxes)
vis_time = (time.time() - vis_start_time) * 1000
print(f"Visualization Processing Time: {vis_time:.2f} ms")
# --- Convert PIL images to base64 ---
encoding_start_time = time.time()
buffered_boxes = io.BytesIO()
image_with_boxes_display.save(buffered_boxes, format="JPEG")
boxes_b64 = base64.b64encode(buffered_boxes.getvalue()).decode("utf-8")
buffered_depth = io.BytesIO()
depth_map_display.save(buffered_depth, format="JPEG")
depth_b64 = base64.b64encode(buffered_depth.getvalue()).decode("utf-8")
encoding_time = (time.time() - encoding_start_time) * 1000
print(f"Image Encoding Time: {encoding_time:.2f} ms")
total_time = (time.time() - total_start_time) * 1000
print(f"Total Scene Analysis Time: {total_time:.2f} ms")
log_memory_usage()
return boxes_b64, depth_b64, depth_info_lines
def format_description(raw_description):
formatted_description = "Scene Description:\n\n"
formatted_description += raw_description
return formatted_description
#--- System Prompt ---
SYSTEM_PROMPT = "You are an assistant developed by Team 5 designed to help blind people understand the environment around them."
def qwen_describe_image(image_file, user_input="Describe the image for a blind person in a structured way.", depth_info_lines=None):
"""Describes the image using Qwen2.5 VL via OpenRouter API."""
try:
total_start_time = time.time()
# Validate image file first
if not image_file or len(image_file) == 0:
print("Error: Empty image file received")
return "I couldn't process the image. The image appears to be empty or corrupted."
# Image validation - make sure we can actually open it
try:
pil_image = Image.open(io.BytesIO(image_file))
pil_image.verify() # Verify image integrity
except Exception as img_err:
print(f"Image validation failed: {img_err}")
return "I couldn't process the image. The image appears to be corrupted or in an unsupported format."
# Image encoding time
img_encoding_start = time.time()
pil_image = Image.open(io.BytesIO(image_file))
buffered = io.BytesIO()
pil_image.save(buffered, format="JPEG")
img_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
img_url = f"data:image/jpeg;base64,{img_base64}"
img_encoding_time = (time.time() - img_encoding_start) * 1000
print(f"Image Encoding Time (QwenVL): {img_encoding_time:.2f} ms")
instruction = """Provide a VERY CONCISE description of the scene in the given image, aiming for approximately 3-5 sentences. Focus ONLY on the most important aspects of the scene, including the main objects, their relative positions, and approximate distances. Describe it in a way that is informative and easy to understand for someone who cannot see.
When describing objects, include both their approximate distance in meters and the equivalent distance in feet (rounded). Always integrate both measurements naturally within your sentences using the phrase "or" (e.g., "3 meters or 10 feet") rather than placing feet measurements in parentheses. Also include their general position or direction in the scene (e.g., "on the left," "in the background," "near the center," etc.), even if that spatial information is not explicitly provided.
Make sure to use the object-distance pairs exactly as provided below—do not estimate or infer your own distances. These are derived from depth data and should be reflected accurately in your description.
Below is an EXAMPLE of the concise format to follow. After receiving the actual image and depth data, respond according to what YOU see in the actual image, not the example:
EXAMPLE Input:
Object-distance pairs:
person: 3 meters (10 feet)
car: 4 meters (13 feet)
bicycle: 2 meters (7 feet)
dining table: 1.5 meters (5 feet)
traffic light: 7 meters (23 feet)
EXAMPLE Response:
"Urban street scene with clear path ahead. Person standing 3 meters or 10 feet directly in front of you. Bicycle parked 2 meters or 7 feet to your right. Car parked 4 meters or 13 feet away on the left side. Traffic light visible 7 meters or 23 feet ahead at intersection."
Now describe the ACTUAL image you're seeing, incorporating the object-distance information provided. Be extremely concise but informative about distances (mentioning both meters and feet) and object locations for navigation. Prioritize closest objects and potential obstacles. Avoid including clearly out-of-place objects (e.g., dining table on a street, or a classroom, or an office) unless they are visibly present in the image."""
user_text = f"{instruction}\n\nUser request: {user_input}"
# Add processed depth information if available
if depth_info_lines:
depth_info_text = process_depth_info(depth_info_lines)
user_text += f"\n\n{depth_info_text}"
messages = [
{"role": "system", "content": SYSTEM_PROMPT}, # System prompt added here
{
"role": "user",
"content": [
{
"type": "text",
"text": user_text
},
{
"type": "image_url",
"image_url": {
"url": img_url
}
}
]
}
]
# Add retry logic for the API call
max_retries = 3
retry_delay = 3 # seconds
for attempt in range(max_retries):
try:
# API call time
api_start_time = time.time()
completion = client.chat.completions.create(
extra_headers={
"HTTP-Referer": OPENROUTER_REFERER_URL,
"X-Title": OPENROUTER_TITLE,
},
model="qwen/qwen2.5-vl-72b-instruct:free",
messages=messages,
timeout=5 # Add timeout for API call
)
api_time = (time.time() - api_start_time) * 1000
print(f"Qwen2.5 VL API Call Time (Describe): {api_time:.2f} ms")
result = completion.choices[0].message.content.strip()
total_time = (time.time() - total_start_time) * 1000
print(f"Total Image Description Time: {total_time:.2f} ms")
log_memory_usage()
return result
except Exception as api_err:
print(f"API call attempt {attempt+1}/{max_retries} failed: {api_err}")
if attempt < max_retries - 1:
print(f"Retrying in {retry_delay} seconds...")
time.sleep(retry_delay)
retry_delay *= 2 # Exponential backoff
else:
raise # Re-raise the last exception if all retries failed
except Exception as e:
print(f"Error in qwen_describe_image: {e}")
return "I couldn't generate a description for this image. There might be an issue with the connection or the image itself. Please try again in a moment."
def qwen_chat_text(user_message, image_base64=None, depth_info_lines=None, session_id=None):
"""Chat with Qwen2.5 via OpenRouter API for text interactions."""
try:
total_start_time = time.time()
global conversation_history
# Initialize conversation history for this session if it doesn't exist
if session_id and session_id not in conversation_history:
conversation_history[session_id] = [{"role": "system", "content": SYSTEM_PROMPT}]
# Get existing conversation or create a new one
messages = conversation_history.get(session_id, [{"role": "system", "content": SYSTEM_PROMPT}])
content_list = []
# Validate image if provided
if image_base64:
try:
# Basic validation - check if it's a valid base64 string
base64.b64decode(image_base64)
img_url = f"data:image/jpeg;base64,{image_base64}"
content_list.append({
"type": "image_url",
"image_url": {
"url": img_url
}
})
print("Including image in message for context")
except Exception as img_err:
print(f"Invalid image data for chat: {img_err}")
# Continue without image rather than failing completely
# Process and add depth information if available
if depth_info_lines:
depth_info_text = process_depth_info(depth_info_lines)
user_message_with_depth = f"{user_message}\n\n{depth_info_text}"
else:
user_message_with_depth = user_message
print(f"Chatbot Input Message: {user_message_with_depth}")
content_list.append({
"type": "text",
"text": user_message_with_depth
})
# Add user message to history
user_message_entry = {"role": "user", "content": content_list}
if session_id:
# If we're using history, append to existing history
# Check if message count is getting too long and truncate if needed
if len(messages) > 10: # Keep conversation reasonably sized
# Keep system message and last 9 messages
messages = [messages[0]] + messages[-9:]
messages.append(user_message_entry)
else:
# For compatibility with existing code (no session)
messages = [{"role": "system", "content": SYSTEM_PROMPT}, user_message_entry]
# Add retry logic for API call
max_retries = 3
retry_delay = 2 # seconds
for attempt in range(max_retries):
try:
# API call time
api_start_time = time.time()
completion = client.chat.completions.create(
extra_headers={
"HTTP-Referer": OPENROUTER_REFERER_URL,
"X-Title": OPENROUTER_TITLE,
},
model="qwen/qwen2.5-vl-72b-instruct:free",
messages=messages,
timeout=30 # Add timeout for API call
)
api_time = (time.time() - api_start_time) * 1000
print(f"Qwen2.5 VL API Call Time (Chat): {api_time:.2f} ms")
# Add assistant response to history
assistant_response = completion.choices[0].message.content.strip()
if session_id:
messages.append({"role": "assistant", "content": assistant_response})
conversation_history[session_id] = messages
total_time = (time.time() - total_start_time) * 1000
print(f"Total Chat Response Time: {total_time:.2f} ms")
log_memory_usage()
return assistant_response
except Exception as api_err:
print(f"API call attempt {attempt+1}/{max_retries} failed: {api_err}")
if attempt < max_retries - 1:
print(f"Retrying in {retry_delay} seconds...")
time.sleep(retry_delay)
retry_delay *= 2 # Exponential backoff
else:
raise # Re-raise the last exception if all retries failed
except Exception as e:
print(f"Error in qwen_chat_text: {e}")
return "I'm having trouble processing your request. This could be due to connection issues. Please try again in a moment."
@app.route('/chat', methods=['POST'])
def handle_chat():
"""Handles chat messages."""
print("Entering handle_chat function...")
try:
total_start_time = time.time()
global last_image_b64, last_depth_info_lines, last_scene_description
user_message = request.form.get('message')
image_b64_chat = request.form.get('image_b64')
depth_info_lines_chat = request.form.get('depth_info_lines')
is_follow_up = request.form.get('is_follow_up', 'false').lower() == 'true'
session_id = request.form.get('session_id', None)
# Generate a session ID if not provided
if not session_id:
session_id = f"session_{len(conversation_history) + 1}"
# Use current values or fall back to stored values if not provided
# Always make sure we have an image to send to the LLM
image_to_use = image_b64_chat if image_b64_chat else last_image_b64
# Only process depth info for initial messages, not for follow-ups
# For follow-ups, use stored depth info but don't recompute
depth_info_lines_chat_list = None
if not is_follow_up and depth_info_lines_chat:
depth_info_lines_chat_list = depth_info_lines_chat.splitlines()
last_depth_info_lines = depth_info_lines_chat_list
elif not is_follow_up and last_depth_info_lines:
depth_info_lines_chat_list = last_depth_info_lines
if not user_message:
return jsonify({"error": "No message provided"}), 400
# Add context from previous scene description for follow-up questions
if is_follow_up and last_scene_description:
# Add a 1-shot example for follow-up questions
follow_up_example = """When answering follow-up questions about a previously described scene, provide specific details based on the information already shared, maintaining the same focus on distances (in both meters and feet) and spatial relationships.
EXAMPLE Previous Description: "Living room with couch 2 meters or 7 feet away on the left. Coffee table 1 meter or 3 feet directly in front. Bookshelf 3 meters or 10 feet on the right side."
EXAMPLE Follow-up Question: "What objects are closest to me?"
EXAMPLE Response: "The coffee table is closest to you at 1 meter or 3 feet directly in front of you, followed by the couch at 2 meters or 7 feet to your left."
Now briefly respond to the ACTUAL follow-up question based on this ACTUAL previous scene description that was provided to the user. IMPORTANT: Your response must be very concise and brief. Keep your answer short and to the point:
"{last_scene_description}"s
Follow-up question: {user_message}"""
context_prompt = follow_up_example
user_message = context_prompt
print("About to call qwen_chat_text...")
# Use the current image if available, otherwise use last stored image
image_to_use = image_b64_chat if image_b64_chat else last_image_b64
chatbot_response = qwen_chat_text(
user_message,
image_base64=image_to_use,
depth_info_lines=depth_info_lines_chat_list,
session_id=session_id
)
print("qwen_chat_text call returned successfully...")
total_time = (time.time() - total_start_time) * 1000
print(f"Total Chat Handler Time: {total_time:.2f} ms")
return jsonify({
"response": chatbot_response,
"session_id": session_id
})
except Exception as e:
print(f"Error in /chat route: {e}")
return jsonify({"error": str(e)}), 500
@app.route('/process', methods=['POST'])
def process_image():
"""Processes image and returns results."""
try:
total_start_time = time.time()
global last_image_b64, last_depth_info_lines, last_scene_description
# Check if image was uploaded
if 'image' not in request.files:
return jsonify({'error': 'No image uploaded'}), 400
image_file = request.files['image'].read()
# Validate image size
if len(image_file) == 0:
return jsonify({'error': 'Empty image file'}), 400
# Basic image validation
try:
pil_image = Image.open(io.BytesIO(image_file))
# Just checking if we can access basic properties
_ = pil_image.size
except Exception as img_err:
print(f"Invalid image uploaded: {img_err}")
return jsonify({'error': 'Invalid or corrupted image file'}), 400
sensitivity = float(request.form.get('sensitivity', 0.3))
session_id = request.form.get('session_id', None)
# Clear conversation history for new sessions
if session_id and session_id in conversation_history:
del conversation_history[session_id]
elif not session_id:
session_id = f"session_{len(conversation_history) + 1}"
# Process with timeout control and explicit error handling
try:
boxes_b64, depth_b64, depth_info_lines = analyze_scene(image_file, sensitivity)
# Validate that analyze_scene produced valid output
if not boxes_b64 or not depth_b64:
raise ValueError("Scene analysis failed to produce valid output")
# Pass depth_info_lines to qwen_describe_image
raw_description = qwen_describe_image(image_file, depth_info_lines=depth_info_lines)
if not raw_description:
raw_description = "I can't describe this image right now. Please try again."
formatted_description = format_description(raw_description)
# Store the description for follow-up questions
last_scene_description = raw_description
depth_info_lines_str = "\n".join(depth_info_lines) if depth_info_lines else ""
# Store the processed image and depth info for future chat messages
last_image_b64 = boxes_b64
last_depth_info_lines = depth_info_lines
# Initialize conversation with the system description
if session_id:
conversation_history[session_id] = [
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": [
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{boxes_b64}"}
},
{"type": "text", "text": "What's in this image?"}
]},
{"role": "assistant", "content": raw_description}
]
total_time = (time.time() - total_start_time) * 1000
print(f"Total Image Processing Time: {total_time:.2f} ms")
log_memory_usage()
return jsonify({
'processed_image': boxes_b64,
'depth_map': depth_b64,
'description': formatted_description,
'depth_info_lines': depth_info_lines_str,
'session_id': session_id
})
except Exception as process_err:
print(f"Error during image processing: {process_err}")
# If there was an error in processing but we have valid boxed image,
# return that with a generic description
if 'boxes_b64' in locals() and boxes_b64:
return jsonify({
'processed_image': boxes_b64,
'depth_map': depth_b64 if 'depth_b64' in locals() and depth_b64 else "",
'description': "I detected objects in the image but couldn't generate a complete description. Please try again.",
'depth_info_lines': "\n".join(depth_info_lines) if 'depth_info_lines' in locals() and depth_info_lines else "",
'session_id': session_id
})
else:
raise # Re-raise if we don't have any valid processed image
except Exception as e:
print(f"Error processing image: {e}")
return jsonify({'error': str(e)})
@app.route('/')
def index():
ngrok_url = os.environ.get("NGROK_URL")
if not ngrok_url:
return "Ngrok URL not set. Please run ngrok separately and set the NGROK_URL environment variable."
return render_template('index.html', ngrok_url=ngrok_url)
if __name__ == '__main__':
port = int(os.environ.get("PORT", 5000))
app.run(debug=True, host='0.0.0.0', port=port, use_reloader=False)