Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Empty file added flask.log
Empty file.
130 changes: 128 additions & 2 deletions flask_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,7 @@ def create_plan():
query = data.get('query', '').strip()
template_key = data.get('template', 'Basic')
search_mode = data.get('search_mode', 'normal')
report_mode = data.get('report_type', 'report')
num_sections = data.get('num_sections', None)

if not query:
Expand Down Expand Up @@ -165,7 +166,8 @@ def create_plan():
# Use enhanced orchestrator
orchestrator = PlanGeneratorOrchestrator(
api_key=api_key,
search_mode=search_mode
search_mode=search_mode,
report_mode=report_mode
)

# Generate plan with enforced diversity
Expand Down Expand Up @@ -256,13 +258,21 @@ def execute_plan():

output_path = orchestrator.execute_plan(research_plan, output_path)

# Read execution log
log_path = str(output_path).replace('.pptx', '.execution.json')
execution_log = []
if os.path.exists(log_path):
with open(log_path, 'r') as f:
execution_log = json.load(f)

# Cache results
report_id = datetime.now().strftime('%Y%m%d_%H%M%S')
slides_cache[report_id] = {
'path': output_path,
'topic': query,
'template': template_key,
'plan_id': plan_id
'plan_id': plan_id,
'execution_log': execution_log
}

logger.info(f"✅ Slides generated: {report_id}")
Expand All @@ -284,6 +294,122 @@ def execute_plan():
}), 500


@app.route('/api/report/<report_id>/content', methods=['GET'])
def get_report_content(report_id):
"""Get the execution log (content) for editing"""
try:
if report_id not in slides_cache:
return jsonify({'error': 'Report not found'}), 404

cached = slides_cache[report_id]
return jsonify({
'report_id': report_id,
'execution_log': cached.get('execution_log', [])
})
except Exception as e:
logger.error(f"Content fetch failed: {e}")
return jsonify({'error': str(e)}), 500

@app.route('/api/report/<report_id>/update', methods=['POST'])
def update_report_content(report_id):
"""Update report content from manual edits and regenerate PPTX"""
try:
if report_id not in slides_cache:
return jsonify({'error': 'Report not found'}), 404

data = request.get_json()
updated_log = data.get('execution_log')

if not updated_log or not isinstance(updated_log, list):
return jsonify({'error': 'Invalid execution log'}), 400

# Regenerate
cached = slides_cache[report_id]
template_key = cached['template']
api_key = os.getenv('OPENAI_API_KEY')

# Get template file
template_file = GlobalConfig.PPTX_TEMPLATE_FILES[template_key]['file']

# New output path (to avoid overwrite locking issues if any)
temp = tempfile.NamedTemporaryFile(delete=False, suffix='.pptx')
output_path = pathlib.Path(temp.name)
temp.close()

orchestrator = ExecutionOrchestrator(
api_key=api_key,
template_path=template_file
)

# Generate from log
output_path = orchestrator.generate_presentation_from_log(updated_log, output_path)

# Update cache
slides_cache[report_id]['path'] = output_path
slides_cache[report_id]['execution_log'] = updated_log

return jsonify({'success': True, 'message': 'Presentation updated'})

except Exception as e:
logger.error(f"Update failed: {e}", exc_info=True)
return jsonify({'error': str(e)}), 500

@app.route('/api/report/<report_id>/chat', methods=['POST'])
def chat_with_report(report_id):
"""Chat to modify specific slide content"""
try:
if report_id not in slides_cache:
return jsonify({'error': 'Report not found'}), 404

cached = slides_cache[report_id]
data = request.get_json()
message = data.get('message', '')
slide_index = data.get('slide_index')

if slide_index is None:
return jsonify({'error': 'Slide selection required for chat'}), 400

execution_log = cached.get('execution_log', [])

if slide_index < 0 or slide_index >= len(execution_log):
return jsonify({'error': 'Invalid slide index'}), 400

# Get orchestrator
template_key = cached['template']
api_key = os.getenv('OPENAI_API_KEY')
template_file = GlobalConfig.PPTX_TEMPLATE_FILES[template_key]['file']

orchestrator = ExecutionOrchestrator(
api_key=api_key,
template_path=template_file
)

# Regenerate content for target slide
target_slide = execution_log[slide_index]
updated_slide = orchestrator.regenerate_slide_content(target_slide, message)
execution_log[slide_index] = updated_slide

# Rebuild PPTX
temp = tempfile.NamedTemporaryFile(delete=False, suffix='.pptx')
output_path = pathlib.Path(temp.name)
temp.close()

output_path = orchestrator.generate_presentation_from_log(execution_log, output_path)

# Update cache
slides_cache[report_id]['path'] = output_path
slides_cache[report_id]['execution_log'] = execution_log

return jsonify({
'success': True,
'message': 'Slide regenerated',
'updated_slide': updated_slide
})

except Exception as e:
logger.error(f"Chat failed: {e}", exc_info=True)
return jsonify({'error': str(e)}), 500

@app.route('/api/download/<report_id>')
def download_report(report_id):
"""Download generated presentation"""
Expand Down
Binary file added src/slidedeckai/__pycache__/__init__.cpython-312.pyc
Binary file not shown.
Binary file added src/slidedeckai/__pycache__/_version.cpython-312.pyc
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
55 changes: 51 additions & 4 deletions src/slidedeckai/agents/content_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,9 +61,10 @@ def generate_subtitle(self, slide_title: str, purpose: str,
return "Analysis"

def generate_bullets(self, slide_title: str, purpose: str,
search_facts: List[str], max_bullets: int = 5) -> List[str]:
search_facts: List[str], max_bullets: int = 5,
max_words_per_bullet: int = 15) -> List[str]:
"""
Generate bullet points from search facts
Generate bullet points from search facts with strict length control
"""

facts_text = "\n".join(search_facts) if search_facts else "No data available"
Expand All @@ -78,7 +79,7 @@ def generate_bullets(self, slide_title: str, purpose: str,

Requirements:
- Generate EXACTLY {max_bullets} bullet points
- Each bullet: 10-20 words
- Each bullet MUST be under {max_words_per_bullet} words to fit layout
- Include QUANTITATIVE data (numbers, percentages)
- Professional, executive-level tone
- NO preamble, ONLY bullet points
Expand Down Expand Up @@ -259,4 +260,50 @@ def generate_kpi(self, slide_title: str, fact: str) -> Dict:

except Exception as e:
logger.error(f"KPI generation failed: {e}")
return {"value": "N/A", "label": slide_title[:20]}
return {"value": "N/A", "label": slide_title[:20]}

def generate_speaker_notes(self, slide_title: str, bullets: List[str], key_facts: List[str]) -> str:
"""
Generate conversational speaker notes
"""

bullet_text = "\n- ".join(bullets) if bullets else "N/A"
fact_text = "\n- ".join(key_facts[:3]) if key_facts else "N/A"

prompt = f"""Generate speaker notes for this slide:

Title: {slide_title}

Visual Content:
- {bullet_text}

Supporting Data:
- {fact_text}

Requirements:
- Conversational tone ("Welcome to this slide...", "Here we see...")
- Explain the key points, don't just read them
- Add a transition sentence to the next topic if applicable
- Keep it under 150 words
- Professional and engaging

Return ONLY the speaker notes text."""

try:
response = self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": "Generate professional speaker notes."},
{"role": "user", "content": prompt}
],
temperature=0.7,
max_tokens=250
)

notes = response.choices[0].message.content.strip()
logger.info(f" ✓ Speaker notes generated")
return notes

except Exception as e:
logger.error(f"Speaker notes generation failed: {e}")
return f"Speaker notes for {slide_title}: Please cover the key points listed on the slide."
23 changes: 21 additions & 2 deletions src/slidedeckai/agents/core_agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,12 +51,23 @@ class ResearchPlan(BaseModel):
class PlanGeneratorOrchestrator:
"""FIX #1 & #6: Remove fallbacks, strengthen validation"""

def __init__(self, api_key: str, search_mode: str = 'normal'):
def __init__(self, api_key: str, search_mode: str = 'normal', report_mode: str = 'report'):
self.api_key = api_key
self.search_mode = search_mode
self.report_mode = report_mode
self.client = OpenAI(api_key=api_key)
self.model = "gpt-4o-mini"
self.used_topics: Set[str] = set()

# Mode-specific prompts
self.mode_prompts = {
'sales': "Focus on value proposition, customer benefits, market opportunity, and call to action. Tone: Persuasive and energetic.",
'executive': "Focus on high-level strategy, key metrics, financial impact, and critical decisions. Tone: Concise and authoritative.",
'professional': "Focus on industry standards, best practices, detailed analysis, and clear methodology. Tone: Formal and objective.",
'report': "Focus on comprehensive coverage, data accuracy, structured findings, and detailed conclusions. Tone: Informative and balanced."
}

self.mode_prompt_add = self.mode_prompts.get(report_mode, self.mode_prompts['report'])

def generate_plan(self, user_query: str, template_layouts: Dict,
num_sections: Optional[int] = None) -> ResearchPlan:
Expand Down Expand Up @@ -372,17 +383,25 @@ def _llm_generate_subtitle_guaranteed_unique(self, purpose: str, position: str,
# Keep all other existing methods unchanged
def _llm_deep_analysis(self, query: str) -> Dict:
"""Existing - unchanged"""

# Adjust for deep mode
aspect_count = "6-10" if self.search_mode == 'normal' else "10-15"
deep_instruction = "Perform a DEEP DRILL DOWN analysis." if self.search_mode == 'deep' else ""

prompt = f"""You are an expert business analyst. Analyze this presentation request:

"{query}"

Style/Mode: {self.mode_prompt_add}
{deep_instruction}

Your task:
1. Understand the MAIN SUBJECT (company, topic, product, etc.)
2. Understand the CONTEXT (financial report, market analysis, product launch, etc.)
3. Identify ALL DISTINCT ASPECTS that should be covered
- Think broadly: metrics, trends, comparisons, breakdowns, outlook, risks, etc.
- Be comprehensive but avoid overlap
- Aim for 6-10 distinct aspects
- Aim for {aspect_count} distinct aspects

Return ONLY valid JSON:
{{
Expand Down
Loading