Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
50 changes: 25 additions & 25 deletions rag_service/core/assignment_context_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -291,28 +291,28 @@ async def refresh_cache(self, assignment_id: str) -> None:
print(f"\nError: {error_msg}")
raise Exception(error_msg)

def verify_settings(self) -> Dict:
"""Verify RAG Settings configuration"""
try:
results = {
"base_url": bool(self.settings.base_url),
"api_key": bool(self.settings.api_key),
"api_secret": bool(self.settings.get_password('api_secret')),
"endpoints": bool(self.settings.assignment_context_endpoint),
"cache_config": bool(self.settings.cache_duration_days is not None)
}

missing = [k for k, v in results.items() if not v]

return {
"status": "Valid" if not missing else "Invalid",
"missing_settings": missing,
"cache_enabled": self.settings.enable_caching,
"cache_duration": self.settings.cache_duration_days
}

except Exception as e:
return {
"status": "Error",
"error": str(e)
}
# def verify_settings(self) -> Dict:
# """Verify RAG Settings configuration"""
# try:
# results = {
# "base_url": bool(self.settings.base_url),
# "api_key": bool(self.settings.api_key),
# "api_secret": bool(self.settings.get_password('api_secret')),
# "endpoints": bool(self.settings.assignment_context_endpoint),
# "cache_config": bool(self.settings.cache_duration_days is not None)
# }

# missing = [k for k, v in results.items() if not v]

# return {
# "status": "Valid" if not missing else "Invalid",
# "missing_settings": missing,
# "cache_enabled": self.settings.enable_caching,
# "cache_duration": self.settings.cache_duration_days
# }

# except Exception as e:
# return {
# "status": "Error",
# "error": str(e)
# }
61 changes: 19 additions & 42 deletions rag_service/core/feedback_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,56 +10,23 @@ class FeedbackProcessor:
def __init__(self):
self.queue_manager = QueueManager()

async def process_feedback(self, request_id: str, feedback: Dict) -> None:
async def process_feedback(self, request_id: str, feedback: Dict, model_used: str, template_used: str) -> None:
"""Process and store feedback in Feedback Request DocType"""
try:
print(f"\n=== Processing Feedback for Request: {request_id} ===")

# Get the feedback request document
feedback_request = frappe.get_doc("Feedback Request", request_id)
print(f"Found Feedback Request: {feedback_request.name}")

# Format feedback for display
formatted_feedback = self.format_feedback_for_display(feedback)


print("\nUpdating Feedback Request fields...")
# Update document fields using db_set
feedback_request.db_set('status', 'Completed', update_modified=True)
feedback_request.db_set('generated_feedback', json.dumps(feedback, indent=2), update_modified=True)
feedback_request.db_set('feedback_summary', formatted_feedback, update_modified=True)
feedback_request.db_set('feedback_summary', feedback['overall_feedback'], update_modified=True)
feedback_request.db_set('completed_at', datetime.now(), update_modified=True)

# Get and set LLM model info
llm_settings = frappe.get_list(
"LLM Settings",
filters={"is_active": 1},
limit=1
)
if llm_settings:
feedback_request.db_set('model_used', llm_settings[0].name, update_modified=True)

# FIXED: Get universal template (no assignment_type filtering)
print("Getting universal template...")
try:
# Get any active template (same logic as langchain_manager.py)
templates = frappe.get_list(
"Prompt Template",
filters={"is_active": 1}, # Only filter by active status
order_by="version desc",
limit=1
)

if templates:
feedback_request.db_set('template_used', templates[0].name, update_modified=True)
print(f"Using universal template: {templates[0].name}")
else:
print("No active template found - leaving template_used empty")
feedback_request.db_set('template_used', '', update_modified=True)

except Exception as template_error:
print(f"Error getting template: {str(template_error)}")
# Don't fail the entire process for template tracking issues
feedback_request.db_set('template_used', '', update_modified=True)
feedback_request.db_set('model_used', model_used, update_modified=True)
feedback_request.db_set('template_used', template_used, update_modified=True)

# Commit changes
frappe.db.commit()
Expand All @@ -70,6 +37,7 @@ async def process_feedback(self, request_id: str, feedback: Dict) -> None:
print(f"Status: {updated_doc.status}")
print(f"Has Generated Feedback: {bool(updated_doc.generated_feedback)}")
print(f"Has Feedback Summary: {bool(updated_doc.feedback_summary)}")
print(f"Model Used: {updated_doc.model_used}")
print(f"Template Used: {updated_doc.template_used}")

# Prepare and send message to TAP LMS
Expand All @@ -78,10 +46,19 @@ async def process_feedback(self, request_id: str, feedback: Dict) -> None:
"student_id": feedback_request.student_id,
"assignment_id": feedback_request.assignment_id,
"feedback": feedback,
"summary": formatted_feedback,
"summary": feedback['overall_feedback'],

"is_plagiarized": feedback['plagiarism_output']['is_plagiarized'],
"is_ai_generated": feedback['plagiarism_output']['is_ai_generated'],
"match_type": feedback['plagiarism_output']['match_type'],
"plagiarism_source": feedback['plagiarism_output']['plagiarism_source'],
"similarity_score": feedback['plagiarism_output']['similarity_score'],
"ai_detection_source": feedback['plagiarism_output']['ai_detection_source'],
"ai_confidence": feedback['plagiarism_output']['ai_confidence'],

"generated_at": feedback_request.completed_at.isoformat() if feedback_request.completed_at else datetime.now().isoformat(),
"plagiarism_score": feedback_request.plagiarism_score,
"similar_sources": json.loads(feedback_request.similar_sources or '[]')
# "plagiarism_score": feedback_request.plagiarism_score,
# "similar_sources": json.loads(feedback_request.similar_sources or '[]')
}

# Send to TAP LMS queue
Expand Down
197 changes: 179 additions & 18 deletions rag_service/core/langchain_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,12 @@ def setup_llm(self):
raise Exception("No active LLM configuration found")

settings = frappe.get_doc("LLM Settings", llm_settings[0].name)
self.model_used = llm_settings[0].name
print("\nUsing LLM Settings:")
print(f"Provider: {settings.provider}")
print(f"Model: {settings.model_name}")


# Create LLM provider based on settings
self.llm_provider = create_llm_provider(
provider=settings.provider,
Expand Down Expand Up @@ -121,27 +123,27 @@ def __init__(self):
self.template_name = "Built-in Universal Template"
self.system_prompt = """You are an expert educational feedback assistant that provides constructive, age-appropriate feedback on student submissions across all subjects and assignment types. You adapt your evaluation criteria and language based on the assignment context provided.

CRITICAL: You must ALWAYS respond with valid JSON, never plain text."""
CRITICAL: You must ALWAYS respond with valid JSON, never plain text."""

self.user_prompt = """Assignment Context:
Assignment Name: {assignment_name}
Subject Area: {course_vertical}
Assignment Type: {assignment_type}
Description: {assignment_description}
Assignment Name: {assignment_name}
Subject Area: {course_vertical}
Assignment Type: {assignment_type}
Description: {assignment_description}

Learning Objectives:
{learning_objectives}
Learning Objectives:
{learning_objectives}

Please analyze this student submission and provide feedback in the required JSON format."""
Please analyze this student submission and provide feedback in the required JSON format."""

self.response_format = """{
"overall_feedback": "Comprehensive feedback about the submission",
"strengths": ["Specific strength 1", "Specific strength 2", "Specific strength 3"],
"areas_for_improvement": ["Improvement area 1", "Improvement area 2"],
"learning_objectives_feedback": ["Feedback on learning objective 1"],
"grade_recommendation": 85,
"encouragement": "Encouraging message for the student"
}"""
"overall_feedback": "Comprehensive feedback about the submission",
"strengths": ["Specific strength 1", "Specific strength 2", "Specific strength 3"],
"areas_for_improvement": ["Improvement area 1", "Improvement area 2"],
"learning_objectives_feedback": ["Feedback on learning objective 1"],
"grade_recommendation": 85,
"encouragement": "Encouraging message for the student"
}"""

return BuiltinTemplate()

Expand Down Expand Up @@ -171,7 +173,7 @@ def get_default_response_format(self) -> Dict:
"encouragement": "Encouraging message for the student"
}

async def generate_feedback(self, assignment_context: Dict, submission_url: str, submission_id: str) -> Dict:
async def generate_feedback_universal(self, assignment_context: Dict, submission_url: str, submission_id: str) -> Dict:
"""Generate feedback using universal template approach"""
try:
print("\n=== Starting Universal Feedback Generation ===")
Expand Down Expand Up @@ -251,16 +253,175 @@ async def generate_feedback(self, assignment_context: Dict, submission_url: str,
# Create structured fallback response
feedback = self.create_fallback_feedback(assignment_context, expected_format)

# Attach default plagiarism/AI-detection metadata
plagiarism_output = {
"is_plagiarized": False,
"is_ai_generated": False,
"match_type": "original",
"plagiarism_source": "none",
"similarity_score": 0.0,
"ai_detection_source": "none",
"ai_confidence": 0.0,
"similar_sources": []
}
feedback["plagiarism_output"] = plagiarism_output

try:
if hasattr(template, 'name'):
template_used = template.name
else:
template_used = "Built-in Universal Template"

except Exception as template_error:
print("Used Default Template:")
template_used = "Built-in Universal Template"
# Don't fail the entire process for template tracking issues

print("\n=== Feedback Generation Completed Successfully ===")
return feedback
return feedback, template_used

except Exception as e:
error_msg = f"Error generating feedback for submission {submission_id}: {str(e)}"
print(f"\nError: {error_msg}")
frappe.log_error(message=error_msg, title="Feedback Generation Error")

# Return structured error response
return self.create_error_feedback(assignment_context)
template_used = "Built-in Universal Template for Error"
return self.create_error_feedback(assignment_context), template_used





async def generate_feedback( self, assignment_context: Dict, submission_url: str, submission_id: str,
plagiarism_data: Dict = None, feedback_request_id: str = None) -> Dict:
"""Generate feedback with plagiarism context"""

result_status = "Pending"

try:
# Check for plagiarism/AI-generated content first
if plagiarism_data:
is_plagiarized = plagiarism_data.get("is_plagiarized", False)
is_ai_generated = plagiarism_data.get("is_ai_generated", False)
match_type = plagiarism_data.get("match_type", "original")
plagiarism_source = plagiarism_data.get("plagiarism_source", "none")

# Handle AI-generated submissions
if is_ai_generated:
result_status = "Success - Flagged"
feedback = self._create_ai_generated_feedback(
plagiarism_data,
assignment_context
)
tempalate_used = "Feedback Template for AI Generated Submission"

# Handle plagiarized submissions
elif is_plagiarized and match_type in ["exact_duplicate", "near_duplicate"]:
result_status = "Success - Flagged"
feedback = self._create_plagiarism_feedback(
plagiarism_data,
assignment_context
)
tempalate_used = "Feedback Template for Plagiarized Submission"

# Continue with normal feedback generation for original work
else:
result_status = "Success - Original"
feedback, tempalate_used = await self.generate_feedback_universal(assignment_context, submission_url,submission_id)

await self._update_result_status(feedback_request_id, result_status)
return feedback, self.model_used, tempalate_used

except Exception as e:
result_status = "Failed"
await self._update_result_status(feedback_request_id, result_status, str(e))
raise

async def _update_result_status(self, feedback_request_id: str, status: str, error_message: str = None):
"""Update Feedback Request result_status"""
if not feedback_request_id:
return

update_data = {"result_status": status}
if error_message:
update_data["error_message"] = error_message[:500] # Truncate long errors

frappe.db.set_value(
"Feedback Request",
feedback_request_id,
update_data,
update_modified=True
)
frappe.db.commit()

def _create_ai_generated_feedback(self, plagiarism_data: Dict, assignment_context: Dict) -> Dict:
"""Create feedback for AI-generated submissions"""

ai_source = plagiarism_data.get("ai_detection_source", "unknown")
ai_confidence = plagiarism_data.get("ai_confidence", 0.0)
response = {
"overall_feedback": f"Your submission appears to be generated by an \
AI tool (detected source: {ai_source}, confidence: {ai_confidence:.0%}). \
At MentorMe, we encourage original creative work that reflects your own learning \
and artistic development. AI-generated images, while interesting, don't demonstrate \
the skills and creativity we're looking to nurture. Please submit your own original \
artwork for this assignment.",
"strengths": ["N/A - AI-generated content detected"],
"areas_for_improvement": ["Submit original artwork created by you",
"Review assignment guidelines for creative direction"],
"learning_objectives_feedback": ["Unable to assess - submission flagged as AI-generated"],
"grade_recommendation": 0,
"encouragement": "We believe in your creative abilities!",
"plagiarism_output": {
"is_plagiarized": False,
"is_ai_generated": True,
"match_type": "ai_generated",
"plagiarism_source": "none",
"similarity_score": 0.0,
"ai_detection_source": ai_source,
"ai_confidence": ai_confidence,
}
}

return response


def _create_plagiarism_feedback( self, plagiarism_data: Dict, assignment_context: Dict) -> Dict:
"""Create feedback for plagiarized submissions"""

match_type = plagiarism_data.get("match_type")
plagiarism_source = plagiarism_data.get("plagiarism_source")
similarity_score = plagiarism_data.get("similarity_score", 0.0)
ai_confidence = plagiarism_data.get("ai_confidence", 0.0)

# respond with structured feedback
response = {
"overall_feedback": f"Your submission has been flagged for similarity \
(similarity: {similarity_score:.0%}, source: {plagiarism_source}).\
Academic integrity is fundamental to the learning process. Please ensure your \
submissions represent your own original work.",
"strengths": ["N/A - Submission flagged for similarity"],
"areas_for_improvement": ["Create original artwork for this assignment",
"Review academic integrity guidelines"],
"learning_objectives_feedback": ["Unable to assess - submission flagged for similarity"],
"grade_recommendation": 0,
"encouragement": "Every artist develops their unique style through practice!",
"plagiarism_output": {
"is_plagiarized": True,
"is_ai_generated": False,
"match_type": match_type,
"plagiarism_source": plagiarism_source,
"similarity_score": similarity_score,
"ai_detection_source": "none",
"ai_confidence": ai_confidence,
}
}

return response




def validate_feedback_structure(self, feedback: Dict, expected_format: Dict) -> Dict:
"""Ensure feedback has all required fields with correct types"""
Expand Down
Loading