Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file added __pycache__/flask_app.cpython-312.pyc
Binary file not shown.
37 changes: 37 additions & 0 deletions flask_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import json

from flask import Flask, request, jsonify, send_file, render_template_string
from werkzeug.utils import secure_filename
from flask_cors import CORS
from dotenv import load_dotenv

Expand Down Expand Up @@ -337,6 +338,42 @@ def get_templates():
return jsonify({'error': str(e)}), 500


@app.route('/api/upload_template', methods=['POST'])
def upload_template():
"""Upload a new PPTX template"""
try:
if 'file' not in request.files:
return jsonify({'error': 'No file part'}), 400

file = request.files['file']
if file.filename == '':
return jsonify({'error': 'No selected file'}), 400

if file and file.filename.endswith('.pptx'):
filename = secure_filename(file.filename)
template_dir = pathlib.Path('src/slidedeckai/pptx_templates')
template_dir.mkdir(parents=True, exist_ok=True)

save_path = template_dir / filename
file.save(save_path)

# Register in GlobalConfig
template_name = filename.replace('.pptx', '').replace('_', ' ').title()
GlobalConfig.PPTX_TEMPLATE_FILES[template_name] = {
'file': save_path,
'caption': 'Uploaded Template'
}

logger.info(f"✅ Template uploaded: {template_name}")
return jsonify({'success': True, 'template': template_name})

return jsonify({'error': 'Invalid file type'}), 400

except Exception as e:
logger.error(f"Upload failed: {e}", exc_info=True)
return jsonify({'error': str(e)}), 500


@app.route('/api/health')
def health():
"""Health check endpoint"""
Expand Down
Binary file added src/slidedeckai/__pycache__/__init__.cpython-312.pyc
Binary file not shown.
Binary file added src/slidedeckai/__pycache__/_version.cpython-312.pyc
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
81 changes: 52 additions & 29 deletions src/slidedeckai/agents/core_agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
"""
import logging
import json
import re
from typing import List, Dict, Optional, Set
from pydantic import BaseModel, Field
from openai import OpenAI
Expand Down Expand Up @@ -585,16 +586,17 @@ def _assign_content_dynamically(self, specs: List, content_phs: List,
largest = sorted_phs[0]
primary_type = self._determine_content_type(enforced, largest)

search_query = self._llm_generate_search_query(
query, purpose, primary_type, "primary"
# CHANGED: Generate multiple queries (3-5)
search_queries = self._llm_generate_search_queries(
query, purpose, primary_type, "primary", count=4
)

specs.append(PlaceholderContentSpec(
placeholder_idx=largest['idx'],
placeholder_type=largest['type'],
content_type=primary_type,
content_description=f"{purpose} - primary",
search_queries=[search_query],
search_queries=search_queries,
position_group=largest.get('position_group', ''),
role="content",
dimensions={
Expand All @@ -607,21 +609,28 @@ def _assign_content_dynamically(self, specs: List, content_phs: List,
for i, ph in enumerate(sorted_phs[1:], 1):
area = ph.get('area', 0)

# IMPROVED: More variety in supporting content (Gap 2)
if area < 1:
ct = 'kpi'
elif area < 3:
ct = 'bullets'
elif area < 4:
# Alternate between bullets and kpi for small/medium boxes
# This ensures multi-column layouts get mixed content
if i % 2 == 0:
ct = 'kpi'
else:
ct = 'bullets'
else:
ct = 'bullets'

sq = self._llm_generate_search_query(query, purpose, ct, f"supporting_{i}")
# CHANGED: Generate multiple queries (2-3)
sqs = self._llm_generate_search_queries(query, purpose, ct, f"supporting_{i}", count=3)

specs.append(PlaceholderContentSpec(
placeholder_idx=ph['idx'],
placeholder_type=ph['type'],
content_type=ct,
content_description=f"{purpose} - supporting",
search_queries=[sq],
search_queries=sqs,
position_group=ph.get('position_group', ''),
role="content",
dimensions={
Expand Down Expand Up @@ -649,42 +658,56 @@ def _determine_content_type(self, enforced: str, ph: Dict) -> str:

return 'bullets'

def _llm_generate_search_query(self, main_query: str, purpose: str,
content_type: str, role: str) -> SearchQuery:
"""Existing - unchanged"""
prompt = f"""Generate a specific search query:
def _llm_generate_search_queries(self, main_query: str, purpose: str,
content_type: str, role: str, count: int = 1) -> List[SearchQuery]:
"""FIX #5: Generate multiple search queries"""
prompt = f"""Generate {count} specific search queries:

Main topic: {main_query}
Slide purpose: {purpose}
Content type: {content_type}
Role: {role}

Create a search query that will find relevant data for this specific need.

Return ONLY the search query text, nothing else."""
Create {count} distinct search queries that will find relevant data for this specific need.
Return ONLY valid JSON array of strings:
["query 1", "query 2"]"""

try:
response = self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": "Generate search queries."},
{"role": "system", "content": "Generate search queries. Return JSON array."},
{"role": "user", "content": prompt}
],
temperature=0.5,
max_tokens=50
temperature=0.7,
max_tokens=250
)

query_text = response.choices[0].message.content.strip().strip('"\'')
text = response.choices[0].message.content.strip()
# Extract JSON list if embedded
m = re.search(r"\[.*\]", text, re.DOTALL)
if m:
text = m.group(0)

return SearchQuery(
query=query_text,
purpose=f"{purpose} - {role}",
expected_source_type='research'
)
queries = json.loads(text)

if not isinstance(queries, list):
queries = [str(queries)]

except:
return SearchQuery(
query=f"{main_query} {content_type}",
purpose=purpose,
expected_source_type='research'
)
return [
SearchQuery(
query=q,
purpose=f"{purpose} - {role}",
expected_source_type='research'
) for q in queries[:count]
]

except Exception as e:
logger.warning(f"Search query generation failed: {e}")
return [
SearchQuery(
query=f"{main_query} {content_type} {i+1}",
purpose=purpose,
expected_source_type='research'
) for i in range(count)
]
95 changes: 94 additions & 1 deletion src/slidedeckai/agents/execution_orchestrator.py
Original file line number Diff line number Diff line change
Expand Up @@ -367,12 +367,27 @@ def _generate_slide_smart(self, section, search_results: Dict,
'section_purpose': getattr(section, 'section_purpose', ''),
'bullet_points': []
}

# FIX #1: Enrich slide_json with content types for matcher
enforced = getattr(section, 'enforced_content_type', 'bullets')
if 'chart' in enforced:
slide_json['chart'] = True
elif 'table' in enforced:
slide_json['table'] = True

# Populate bullets from placeholder_specs descriptions when available
for spec in getattr(section, 'placeholder_specs', []) or []:
try:
desc = getattr(spec, 'content_description', None) or getattr(spec, 'content_type', None)
if desc:
slide_json['bullet_points'].append(str(desc))

# Enrich specific items if spec has content_type info
ctype = getattr(spec, 'content_type', '')
if 'chart' in ctype:
slide_json['chart'] = True
elif 'table' in ctype:
slide_json['table'] = True
except Exception:
continue

Expand All @@ -395,6 +410,29 @@ def _generate_slide_smart(self, section, search_results: Dict,
except Exception:
pass

# FIX: Override placeholder roles from Plan specs (connects core_agents decisions to execution)
if getattr(section, 'placeholder_specs', None):
for spec in section.placeholder_specs:
try:
pid = getattr(spec, 'placeholder_idx', None)
ctype = getattr(spec, 'content_type', None)

if pid is not None and ctype:
# Normalize role
new_role = None
if 'kpi' in ctype: new_role = 'kpi'
elif 'chart' in ctype: new_role = 'chart'
elif 'table' in ctype: new_role = 'table'
elif 'bullets' in ctype: new_role = 'content'

if new_role and pid in placeholder_map:
current_role = placeholder_map[pid].get('role')
if current_role != new_role:
placeholder_map[pid]['role'] = new_role
logger.info(f" → Role override from Plan for ph {pid}: {current_role} -> {new_role}")
except Exception:
pass

# PREPARE content for placeholders in parallel (only text/chart/table data generation)
prepared_content = self._prepare_section_content(section, placeholder_map, search_results)

Expand Down Expand Up @@ -622,7 +660,12 @@ def _fill_subtitle(self, placeholder, ph_id: int, section, search_results: Dict)
base_pt = 18
run.font.size = Pt(base_pt * 0.8)
except Exception:
run.font.size = Pt(14) # fallback
# FIX #5: Soft fallback based on template
try:
base = self.template_properties.get('default_fonts', {}).get('size', Pt(18)).pt
run.font.size = Pt(base * 0.8)
except:
run.font.size = Pt(14)
except Exception as e:
logger.debug(f"Font application failed: {e}")

Expand Down Expand Up @@ -1046,6 +1089,16 @@ def _fill_content(self, placeholder, ph_id: int, ph_info: Dict,
max_bullets=max_bullets
)

# FIX #4: Text overflow validation
if self._check_text_overflow(bullets, ph_info['area'], ph_info.get('width', 0), ph_info.get('height', 0)):
logger.warning(f" ⚠️ Overflow detected, regenerating shorter bullets...")
bullets = self.content_generator.generate_bullets(
section.section_title,
section.section_purpose,
relevant_facts,
max_bullets=max(3, max_bullets - 2) # Reduce count
)

text_frame = placeholder.text_frame
text_frame.clear()

Expand Down Expand Up @@ -1078,6 +1131,46 @@ def _fill_content(self, placeholder, ph_id: int, ph_info: Dict,
'status': 'filled'
}

def _check_text_overflow(self, bullets: List[str], area: float,
width_inches: float = 0, height_inches: float = 0) -> bool:
"""FIX #4: Validate if text fits in placeholder using precise metrics"""
# If dimensions not provided, fall back to area heuristic
if width_inches <= 0 or height_inches <= 0:
total_chars = sum(len(b) for b in bullets)
capacity = area * 50
if total_chars > capacity:
logger.info(f" Overflow check (area): {total_chars} > {capacity:.0f}")
return True
return False

# Precise calculation
# Assumptions for Calibri 18pt (typical body text)
font_points = 18
# Avg char width approx 0.5 * font_size for variable width font
char_width_inch = (font_points * 0.5) / 72.0
line_height_inch = (font_points * 1.2) / 72.0

available_lines = int(height_inches / line_height_inch)
chars_per_line = int(width_inches / char_width_inch)

if chars_per_line <= 0:
chars_per_line = 1

used_lines = 0
for b in bullets:
# Bullet char + indentation
b_len = len(b) + 4
lines = (b_len / chars_per_line) + 1 # Simplified wrapping
used_lines += lines
# Paragraph spacing
used_lines += 0.2 # extra space between bullets

if used_lines > available_lines:
logger.info(f" Overflow check (dim): {used_lines:.1f} lines > {available_lines} available")
return True

return False

def _calculate_max_bullets(self, area: float) -> int:
"""Existing logic - unchanged"""
if area < 3:
Expand Down
Binary file not shown.
Binary file not shown.
Loading