-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp.py
More file actions
332 lines (262 loc) · 11.7 KB
/
app.py
File metadata and controls
332 lines (262 loc) · 11.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
"""MLX-VLM Receipt Scanner Service"""
import json
import logging
import os
import tempfile
from io import BytesIO
from flask import Flask, request, jsonify
from flask_cors import CORS
from PIL import Image
import mlx_vlm
import requests
import config
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
# Initialize Flask app
app = Flask(__name__)
CORS(app)
# Global model cache
model = None
processor = None
def load_model():
"""Load the MLX-VLM model (lazy loading)"""
global model, processor
if model is None:
logger.info(f"Loading model: {config.MODEL_NAME}")
try:
model, processor = mlx_vlm.load(config.MODEL_NAME)
logger.info("Model loaded successfully")
except Exception as e:
logger.error(f"Failed to load model: {e}")
raise
return model, processor
def allowed_file(filename):
"""Check if file extension is allowed"""
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in config.ALLOWED_EXTENSIONS
def resize_image_if_needed(image, max_dimension=None):
"""Resize image if it exceeds max dimension to prevent memory issues.
Args:
image: PIL Image object
max_dimension: Maximum width or height allowed (uses config.MAX_IMAGE_DIMENSION if None)
Returns:
Resized PIL Image if needed, otherwise original image
"""
if max_dimension is None:
max_dimension = config.MAX_IMAGE_DIMENSION
width, height = image.size
# Check if resizing is needed
if width <= max_dimension and height <= max_dimension:
logger.info(f"Image size {width}x{height} is within limits ({max_dimension}px max), no resize needed")
return image
# Calculate new dimensions maintaining aspect ratio
if width > height:
new_width = max_dimension
new_height = int(height * (max_dimension / width))
else:
new_height = max_dimension
new_width = int(width * (max_dimension / height))
logger.info(f"Resizing image from {width}x{height} to {new_width}x{new_height} (max: {max_dimension}px)")
# Use LANCZOS for high-quality downsampling
resized = image.resize((new_width, new_height), Image.LANCZOS)
return resized
def extract_json_from_response(text):
"""Extract JSON from model response, handling markdown code blocks"""
text = text.strip()
# Try to extract JSON from markdown code blocks first
import re
json_blocks = re.findall(r'```json\s*(\{.*?\})\s*```', text, re.DOTALL)
if json_blocks:
# Try each block until we find valid JSON
for block in json_blocks:
try:
return json.loads(block.strip())
except json.JSONDecodeError:
continue
# Try to find JSON object without code blocks
start_idx = text.find('{')
end_idx = text.rfind('}')
if start_idx != -1 and end_idx != -1:
json_str = text[start_idx:end_idx + 1]
try:
return json.loads(json_str)
except json.JSONDecodeError as e:
logger.error(f"JSON decode error: {e}")
logger.error(f"Attempted to parse: {json_str[:500]}")
raise
raise ValueError("No valid JSON object found in response")
@app.route('/health', methods=['GET'])
def health_check():
"""Health check endpoint"""
return jsonify({
"status": "healthy",
"model": config.MODEL_NAME,
"model_loaded": model is not None
})
@app.route('/extract', methods=['POST'])
def extract_receipt():
"""Extract receipt data from uploaded image or image URL"""
try:
image = None
image_source = None
# Check if image URL is provided (JSON request)
if request.is_json:
data = request.get_json()
image_url = data.get('image_url')
if not image_url:
return jsonify({"error": "No image_url provided in JSON body"}), 400
logger.info(f"Processing image from URL: {image_url}")
image_source = image_url
# Download image from URL
try:
response = requests.get(image_url, timeout=10)
response.raise_for_status()
# Check content type
content_type = response.headers.get('content-type', '')
if not content_type.startswith('image/'):
return jsonify({"error": f"URL does not point to an image. Content-Type: {content_type}"}), 400
# Check file size
file_size = len(response.content)
if file_size > config.MAX_IMAGE_SIZE:
return jsonify({"error": f"Image too large. Max size: {config.MAX_IMAGE_SIZE} bytes"}), 400
logger.info(f"Downloaded image ({file_size} bytes)")
# Load image
image = Image.open(BytesIO(response.content))
if image.mode != 'RGB':
image = image.convert('RGB')
# Resize if too large to prevent memory issues
image = resize_image_if_needed(image)
except requests.exceptions.RequestException as e:
logger.error(f"Failed to download image from URL: {e}")
return jsonify({"error": f"Failed to download image: {str(e)}"}), 400
except Exception as e:
logger.error(f"Failed to load image from URL: {e}")
return jsonify({"error": "Invalid image from URL"}), 400
# Check if image file is uploaded (multipart/form-data request)
elif 'image' in request.files:
file = request.files['image']
if file.filename == '':
return jsonify({"error": "No file selected"}), 400
if not allowed_file(file.filename):
return jsonify({"error": f"File type not allowed. Allowed types: {config.ALLOWED_EXTENSIONS}"}), 400
# Check file size
file.seek(0, os.SEEK_END)
file_size = file.tell()
file.seek(0)
if file_size > config.MAX_IMAGE_SIZE:
return jsonify({"error": f"File too large. Max size: {config.MAX_IMAGE_SIZE} bytes"}), 400
logger.info(f"Processing uploaded image: {file.filename} ({file_size} bytes)")
image_source = file.filename
# Load image
try:
image = Image.open(BytesIO(file.read()))
# Convert to RGB if necessary
if image.mode != 'RGB':
image = image.convert('RGB')
# Resize if too large to prevent memory issues
image = resize_image_if_needed(image)
except Exception as e:
logger.error(f"Failed to load image: {e}")
return jsonify({"error": "Invalid image file"}), 400
else:
return jsonify({"error": "No image file or image_url provided"}), 400
# At this point, we have a valid PIL Image object
# Load model (lazy loading)
try:
model_obj, processor_obj = load_model()
except Exception as e:
logger.error(f"Model loading failed: {e}")
return jsonify({"error": "Model initialization failed"}), 500
# Save image temporarily for mlx_vlm
with tempfile.NamedTemporaryFile(delete=False, suffix='.png') as tmp_file:
image.save(tmp_file.name)
tmp_image_path = tmp_file.name
try:
# Verify image was saved
logger.info(f"Temp image saved: {tmp_image_path}, exists: {os.path.exists(tmp_image_path)}, size: {os.path.getsize(tmp_image_path)} bytes")
# Prepare messages in the format Qwen2-VL expects
messages = [
{
"role": "user",
"content": [
{"type": "image", "image": tmp_image_path},
{"type": "text", "text": config.USER_PROMPT}
]
}
]
logger.info(f"Generating response from model with max_tokens={config.MAX_TOKENS}...")
logger.info(f"Image path: {tmp_image_path}")
# Apply chat template to format the prompt correctly
# Get model config
model_config = model_obj.config if hasattr(model_obj, 'config') else {}
prompt = mlx_vlm.apply_chat_template(
processor_obj,
model_config,
messages,
num_images=1
)
logger.info(f"Formatted prompt type: {type(prompt)}")
result = mlx_vlm.generate(
model_obj,
processor_obj,
prompt,
image=tmp_image_path,
max_tokens=config.MAX_TOKENS,
temperature=config.TEMPERATURE,
verbose=True
)
# Extract text from GenerationResult
response = result.text if hasattr(result, 'text') else str(result)
logger.info(f"Model generated {result.generation_tokens} tokens in {result.generation_tps:.1f} tokens/sec")
# Extract JSON from response
receipt_data = extract_json_from_response(response)
logger.info("Successfully extracted receipt data")
return jsonify(receipt_data), 200
except json.JSONDecodeError as e:
logger.error(f"Failed to parse JSON from model response: {e}")
return jsonify({
"error": "Failed to parse model response as JSON",
"raw_response": response[:1000] # Return first 1000 chars for debugging
}), 500
except ValueError as e:
logger.error(f"No valid JSON in response: {e}")
return jsonify({
"error": f"Model inference failed: {str(e)}",
"raw_response": response[:1000] if 'response' in locals() else "No response"
}), 500
except Exception as e:
logger.error(f"Model inference failed: {e}")
return jsonify({
"error": f"Model inference failed: {str(e)}",
"raw_response": response[:1000] if 'response' in locals() else "No response"
}), 500
finally:
# Clean up temp file
if os.path.exists(tmp_image_path):
os.unlink(tmp_image_path)
except Exception as e:
logger.error(f"Unexpected error: {e}")
return jsonify({"error": f"Internal server error: {str(e)}"}), 500
@app.route('/models', methods=['GET'])
def list_models():
"""List available models"""
return jsonify({
"current_model": config.MODEL_NAME,
"recommended_models": [
"qwen/Qwen2-VL-2B-Instruct",
"qwen/Qwen2-VL-7B-Instruct",
"llava-hf/llava-1.5-7b-hf"
]
})
if __name__ == '__main__':
logger.info(f"Starting MLX-VLM Receipt Scanner Service on {config.HOST}:{config.PORT}")
logger.info(f"Model: {config.MODEL_NAME}")
# Optionally preload model at startup
if os.getenv("PRELOAD_MODEL", "false").lower() == "true":
logger.info("Preloading model...")
load_model()
app.run(host=config.HOST, port=config.PORT, debug=config.DEBUG)