Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 7 additions & 4 deletions mem_mcp_server/utils/summarizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ class HTTPOpenAISummarizer:
HTTP-based OpenAI summarizer that doesn't depend on the openai package
"""

def __init__(self, api_key: str = None, model: str = "gpt-4o-mini"):
def __init__(self, api_key: str = None, model: str = "qwen2:0.5b"):
"""
Initialize the HTTP OpenAI summarizer

Expand All @@ -48,7 +48,8 @@ def __init__(self, api_key: str = None, model: str = "gpt-4o-mini"):

self.api_key = api_key or os.environ.get("OPENAI_API_KEY")
self.model = model
self.api_url = "https://api.openai.com/v1/chat/completions"
self.api_url = "http://127.0.0.1:11434/v1/chat/completions"
# self.api_url = "https://api.openai.com/v1/chat/completions"

LOGGER.info(f"API key set: {'Yes' if self.api_key else 'No'}")

Expand Down Expand Up @@ -125,7 +126,7 @@ def generate_summary(self, context: str) -> str:

def _get_json_system_prompt(self) -> str:
"""Get optimized system prompt that ensures JSON output"""
return f"""You are an expert development assistant specializing in analyzing commit history and creating detailed project summaries.
return f"""You are an expert development assistant specializing in analyzing commit history and creating detailed project summaries.

You must respond with a valid JSON object following this exact schema:

Expand All @@ -134,7 +135,7 @@ def _get_json_system_prompt(self) -> str:
Instructions for analysis:
1. Analyze each commit chronologically
2. Extract user requests and intents behind changes
3. Identify technical decisions and code patterns
3. Identify technical decisions and code patterns
4. Document file changes with specific details
5. Note errors encountered and how they were resolved
6. Track ongoing troubleshooting efforts
Expand Down Expand Up @@ -309,6 +310,8 @@ def create_summary_from_commits(
# Try AI summarization using HTTP client (no dependencies)
summarizer = HTTPOpenAISummarizer()
ai_summary = summarizer.generate_summary(context)
print("AI Summary Generated:")
print(ai_summary)

return {
"ai_generated_summary": ai_summary,
Expand Down
5 changes: 5 additions & 0 deletions memov/debugging/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
"""Debugging utilities for memov."""

from .llm_client import LLMClient

__all__ = ["LLMClient"]
152 changes: 152 additions & 0 deletions memov/debugging/llm_client.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
"""
LLM client for document generation.

Provides a unified interface for calling various LLM providers.
Uses litellm for multi-provider support.
"""

import logging
from typing import Any, Dict, List, Optional

logger = logging.getLogger(__name__)


class LLMClient:
"""Client for interacting with LLM APIs."""

def __init__(self, models: List[str], api_key: Optional[str] = None):
"""
Initialize LLM client.

Args:
models: List of model names to use
api_key: Optional API key (will use environment variables if not provided)
"""
self.models = models
self.api_key = api_key

# Try to import litellm
try:
import litellm
self.litellm = litellm
self.available = True

# Configure litellm
if api_key:
litellm.api_key = api_key

# Suppress verbose logging
litellm.set_verbose = False

except ImportError:
logger.warning(
"litellm not installed. Install with: pip install litellm\n"
"Document generation will use fallback mode."
)
self.litellm = None
self.available = False

def query_single(
self,
model: str,
prompt: str,
system_prompt: Optional[str] = None,
temperature: float = 0.7,
max_tokens: int = 4000,
**kwargs
) -> Dict[str, Any]:
"""
Query a single LLM model.

Args:
model: Model name
prompt: User prompt
system_prompt: Optional system prompt
temperature: Sampling temperature
max_tokens: Maximum tokens to generate
**kwargs: Additional arguments for litellm

Returns:
Dictionary with 'content' and optionally 'error' keys
"""
if not self.available or not self.litellm:
return {
'error': 'LLM client not available',
'content': ''
}

try:
# Build messages
messages = []
if system_prompt:
messages.append({
"role": "system",
"content": system_prompt
})
messages.append({
"role": "user",
"content": prompt
})

# Call LLM
response = self.litellm.completion(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
**kwargs
)

print("LLM Response:", response)

# Extract content
content = response.choices[0].message.content

return {
'content': content,
'model': model,
# Unused now
# 'usage': response.usage._asdict() if hasattr(response, 'usage') else {}
'usage': response.usage
}

except Exception as e:
logger.error(f"Error calling LLM {model}: {e}")
return {
'error': str(e),
'content': ''
}

def query_multiple(
self,
prompts: List[str],
model: Optional[str] = None,
system_prompt: Optional[str] = None,
**kwargs
) -> List[Dict[str, Any]]:
"""
Query LLM with multiple prompts.

Args:
prompts: List of prompts
model: Model to use (default: first model in list)
system_prompt: Optional system prompt
**kwargs: Additional arguments

Returns:
List of response dictionaries
"""
if not model:
model = self.models[0] if self.models else "gpt-4o-mini"

responses = []
for prompt in prompts:
response = self.query_single(
model=model,
prompt=prompt,
system_prompt=system_prompt,
**kwargs
)
responses.append(response)

return responses
47 changes: 47 additions & 0 deletions memov/docgen/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
"""
Documentation generation module for code analysis and documentation.

This module provides tools for:
- Code parsing and analysis
- Document generation using LLM
- Mermaid diagram generation
- Multi-level documentation (commit, branch, repository)
- Git integration for commit/branch analysis
- Web preview server
"""

from .code_analyzer import CodeAnalyzer, ClassInfo, FunctionInfo, ModuleInfo
from .diagram_generator import DiagramGenerator, DiagramType
from .doc_generator import DocType, DocumentGenerator, DocumentStructure, GeneratedDocument
from .git_utils import CommitInfo, GitUtils

# Preview server is optional (requires starlette)
try:
from .preview_server import PreviewServer, start_preview_server
_HAS_PREVIEW_SERVER = True
except ImportError:
PreviewServer = None
start_preview_server = None
_HAS_PREVIEW_SERVER = False

__all__ = [
# Code Analysis
"CodeAnalyzer",
"ModuleInfo",
"ClassInfo",
"FunctionInfo",
# Document Generation
"DocumentGenerator",
"DocumentStructure",
"GeneratedDocument",
"DocType",
# Diagram Generation
"DiagramGenerator",
"DiagramType",
# Git Utilities
"GitUtils",
"CommitInfo",
# Preview Server (optional)
"PreviewServer",
"start_preview_server",
]
Loading