diff --git a/rag_service/core/assignment_context_manager.py b/rag_service/core/assignment_context_manager.py
index c5cc8d7..2e45ccb 100644
--- a/rag_service/core/assignment_context_manager.py
+++ b/rag_service/core/assignment_context_manager.py
@@ -291,28 +291,28 @@ async def refresh_cache(self, assignment_id: str) -> None:
print(f"\nError: {error_msg}")
raise Exception(error_msg)
- def verify_settings(self) -> Dict:
- """Verify RAG Settings configuration"""
- try:
- results = {
- "base_url": bool(self.settings.base_url),
- "api_key": bool(self.settings.api_key),
- "api_secret": bool(self.settings.get_password('api_secret')),
- "endpoints": bool(self.settings.assignment_context_endpoint),
- "cache_config": bool(self.settings.cache_duration_days is not None)
- }
-
- missing = [k for k, v in results.items() if not v]
-
- return {
- "status": "Valid" if not missing else "Invalid",
- "missing_settings": missing,
- "cache_enabled": self.settings.enable_caching,
- "cache_duration": self.settings.cache_duration_days
- }
-
- except Exception as e:
- return {
- "status": "Error",
- "error": str(e)
- }
+ # def verify_settings(self) -> Dict:
+ # """Verify RAG Settings configuration"""
+ # try:
+ # results = {
+ # "base_url": bool(self.settings.base_url),
+ # "api_key": bool(self.settings.api_key),
+ # "api_secret": bool(self.settings.get_password('api_secret')),
+ # "endpoints": bool(self.settings.assignment_context_endpoint),
+ # "cache_config": bool(self.settings.cache_duration_days is not None)
+ # }
+
+ # missing = [k for k, v in results.items() if not v]
+
+ # return {
+ # "status": "Valid" if not missing else "Invalid",
+ # "missing_settings": missing,
+ # "cache_enabled": self.settings.enable_caching,
+ # "cache_duration": self.settings.cache_duration_days
+ # }
+
+ # except Exception as e:
+ # return {
+ # "status": "Error",
+ # "error": str(e)
+ # }
diff --git a/rag_service/core/feedback_processor.py b/rag_service/core/feedback_processor.py
index da459ec..f5cab96 100644
--- a/rag_service/core/feedback_processor.py
+++ b/rag_service/core/feedback_processor.py
@@ -10,56 +10,23 @@ class FeedbackProcessor:
def __init__(self):
self.queue_manager = QueueManager()
- async def process_feedback(self, request_id: str, feedback: Dict) -> None:
+ async def process_feedback(self, request_id: str, feedback: Dict, model_used: str, template_used: str) -> None:
"""Process and store feedback in Feedback Request DocType"""
try:
print(f"\n=== Processing Feedback for Request: {request_id} ===")
-
+
# Get the feedback request document
feedback_request = frappe.get_doc("Feedback Request", request_id)
print(f"Found Feedback Request: {feedback_request.name}")
-
- # Format feedback for display
- formatted_feedback = self.format_feedback_for_display(feedback)
-
+
print("\nUpdating Feedback Request fields...")
# Update document fields using db_set
feedback_request.db_set('status', 'Completed', update_modified=True)
feedback_request.db_set('generated_feedback', json.dumps(feedback, indent=2), update_modified=True)
- feedback_request.db_set('feedback_summary', formatted_feedback, update_modified=True)
+ feedback_request.db_set('feedback_summary', feedback['overall_feedback'], update_modified=True)
feedback_request.db_set('completed_at', datetime.now(), update_modified=True)
-
- # Get and set LLM model info
- llm_settings = frappe.get_list(
- "LLM Settings",
- filters={"is_active": 1},
- limit=1
- )
- if llm_settings:
- feedback_request.db_set('model_used', llm_settings[0].name, update_modified=True)
-
- # FIXED: Get universal template (no assignment_type filtering)
- print("Getting universal template...")
- try:
- # Get any active template (same logic as langchain_manager.py)
- templates = frappe.get_list(
- "Prompt Template",
- filters={"is_active": 1}, # Only filter by active status
- order_by="version desc",
- limit=1
- )
-
- if templates:
- feedback_request.db_set('template_used', templates[0].name, update_modified=True)
- print(f"Using universal template: {templates[0].name}")
- else:
- print("No active template found - leaving template_used empty")
- feedback_request.db_set('template_used', '', update_modified=True)
-
- except Exception as template_error:
- print(f"Error getting template: {str(template_error)}")
- # Don't fail the entire process for template tracking issues
- feedback_request.db_set('template_used', '', update_modified=True)
+ feedback_request.db_set('model_used', model_used, update_modified=True)
+ feedback_request.db_set('template_used', template_used, update_modified=True)
# Commit changes
frappe.db.commit()
@@ -70,6 +37,7 @@ async def process_feedback(self, request_id: str, feedback: Dict) -> None:
print(f"Status: {updated_doc.status}")
print(f"Has Generated Feedback: {bool(updated_doc.generated_feedback)}")
print(f"Has Feedback Summary: {bool(updated_doc.feedback_summary)}")
+ print(f"Model Used: {updated_doc.model_used}")
print(f"Template Used: {updated_doc.template_used}")
# Prepare and send message to TAP LMS
@@ -78,10 +46,19 @@ async def process_feedback(self, request_id: str, feedback: Dict) -> None:
"student_id": feedback_request.student_id,
"assignment_id": feedback_request.assignment_id,
"feedback": feedback,
- "summary": formatted_feedback,
+ "summary": feedback['overall_feedback'],
+
+ "is_plagiarized": feedback['plagiarism_output']['is_plagiarized'],
+ "is_ai_generated": feedback['plagiarism_output']['is_ai_generated'],
+ "match_type": feedback['plagiarism_output']['match_type'],
+ "plagiarism_source": feedback['plagiarism_output']['plagiarism_source'],
+ "similarity_score": feedback['plagiarism_output']['similarity_score'],
+ "ai_detection_source": feedback['plagiarism_output']['ai_detection_source'],
+ "ai_confidence": feedback['plagiarism_output']['ai_confidence'],
+
"generated_at": feedback_request.completed_at.isoformat() if feedback_request.completed_at else datetime.now().isoformat(),
- "plagiarism_score": feedback_request.plagiarism_score,
- "similar_sources": json.loads(feedback_request.similar_sources or '[]')
+ # "plagiarism_score": feedback_request.plagiarism_score,
+ # "similar_sources": json.loads(feedback_request.similar_sources or '[]')
}
# Send to TAP LMS queue
diff --git a/rag_service/core/langchain_manager.py b/rag_service/core/langchain_manager.py
index 29b719d..c510afe 100644
--- a/rag_service/core/langchain_manager.py
+++ b/rag_service/core/langchain_manager.py
@@ -25,10 +25,12 @@ def setup_llm(self):
raise Exception("No active LLM configuration found")
settings = frappe.get_doc("LLM Settings", llm_settings[0].name)
+ self.model_used = llm_settings[0].name
print("\nUsing LLM Settings:")
print(f"Provider: {settings.provider}")
print(f"Model: {settings.model_name}")
+
# Create LLM provider based on settings
self.llm_provider = create_llm_provider(
provider=settings.provider,
@@ -121,27 +123,27 @@ def __init__(self):
self.template_name = "Built-in Universal Template"
self.system_prompt = """You are an expert educational feedback assistant that provides constructive, age-appropriate feedback on student submissions across all subjects and assignment types. You adapt your evaluation criteria and language based on the assignment context provided.
-CRITICAL: You must ALWAYS respond with valid JSON, never plain text."""
+ CRITICAL: You must ALWAYS respond with valid JSON, never plain text."""
self.user_prompt = """Assignment Context:
-Assignment Name: {assignment_name}
-Subject Area: {course_vertical}
-Assignment Type: {assignment_type}
-Description: {assignment_description}
+ Assignment Name: {assignment_name}
+ Subject Area: {course_vertical}
+ Assignment Type: {assignment_type}
+ Description: {assignment_description}
-Learning Objectives:
-{learning_objectives}
+ Learning Objectives:
+ {learning_objectives}
-Please analyze this student submission and provide feedback in the required JSON format."""
+ Please analyze this student submission and provide feedback in the required JSON format."""
self.response_format = """{
- "overall_feedback": "Comprehensive feedback about the submission",
- "strengths": ["Specific strength 1", "Specific strength 2", "Specific strength 3"],
- "areas_for_improvement": ["Improvement area 1", "Improvement area 2"],
- "learning_objectives_feedback": ["Feedback on learning objective 1"],
- "grade_recommendation": 85,
- "encouragement": "Encouraging message for the student"
-}"""
+ "overall_feedback": "Comprehensive feedback about the submission",
+ "strengths": ["Specific strength 1", "Specific strength 2", "Specific strength 3"],
+ "areas_for_improvement": ["Improvement area 1", "Improvement area 2"],
+ "learning_objectives_feedback": ["Feedback on learning objective 1"],
+ "grade_recommendation": 85,
+ "encouragement": "Encouraging message for the student"
+ }"""
return BuiltinTemplate()
@@ -171,7 +173,7 @@ def get_default_response_format(self) -> Dict:
"encouragement": "Encouraging message for the student"
}
- async def generate_feedback(self, assignment_context: Dict, submission_url: str, submission_id: str) -> Dict:
+ async def generate_feedback_universal(self, assignment_context: Dict, submission_url: str, submission_id: str) -> Dict:
"""Generate feedback using universal template approach"""
try:
print("\n=== Starting Universal Feedback Generation ===")
@@ -251,8 +253,32 @@ async def generate_feedback(self, assignment_context: Dict, submission_url: str,
# Create structured fallback response
feedback = self.create_fallback_feedback(assignment_context, expected_format)
+ # Attach default plagiarism/AI-detection metadata
+ plagiarism_output = {
+ "is_plagiarized": False,
+ "is_ai_generated": False,
+ "match_type": "original",
+ "plagiarism_source": "none",
+ "similarity_score": 0.0,
+ "ai_detection_source": "none",
+ "ai_confidence": 0.0,
+ "similar_sources": []
+ }
+ feedback["plagiarism_output"] = plagiarism_output
+
+ try:
+ if hasattr(template, 'name'):
+ template_used = template.name
+ else:
+ template_used = "Built-in Universal Template"
+
+ except Exception as template_error:
+ print("Used Default Template:")
+ template_used = "Built-in Universal Template"
+ # Don't fail the entire process for template tracking issues
+
print("\n=== Feedback Generation Completed Successfully ===")
- return feedback
+ return feedback, template_used
except Exception as e:
error_msg = f"Error generating feedback for submission {submission_id}: {str(e)}"
@@ -260,7 +286,142 @@ async def generate_feedback(self, assignment_context: Dict, submission_url: str,
frappe.log_error(message=error_msg, title="Feedback Generation Error")
# Return structured error response
- return self.create_error_feedback(assignment_context)
+ template_used = "Built-in Universal Template for Error"
+ return self.create_error_feedback(assignment_context), template_used
+
+
+
+
+
+ async def generate_feedback( self, assignment_context: Dict, submission_url: str, submission_id: str,
+ plagiarism_data: Dict = None, feedback_request_id: str = None) -> Dict:
+ """Generate feedback with plagiarism context"""
+
+ result_status = "Pending"
+
+ try:
+ # Check for plagiarism/AI-generated content first
+ if plagiarism_data:
+ is_plagiarized = plagiarism_data.get("is_plagiarized", False)
+ is_ai_generated = plagiarism_data.get("is_ai_generated", False)
+ match_type = plagiarism_data.get("match_type", "original")
+ plagiarism_source = plagiarism_data.get("plagiarism_source", "none")
+
+ # Handle AI-generated submissions
+ if is_ai_generated:
+ result_status = "Success - Flagged"
+ feedback = self._create_ai_generated_feedback(
+ plagiarism_data,
+ assignment_context
+ )
+ tempalate_used = "Feedback Template for AI Generated Submission"
+
+ # Handle plagiarized submissions
+ elif is_plagiarized and match_type in ["exact_duplicate", "near_duplicate"]:
+ result_status = "Success - Flagged"
+ feedback = self._create_plagiarism_feedback(
+ plagiarism_data,
+ assignment_context
+ )
+ tempalate_used = "Feedback Template for Plagiarized Submission"
+
+ # Continue with normal feedback generation for original work
+ else:
+ result_status = "Success - Original"
+ feedback, tempalate_used = await self.generate_feedback_universal(assignment_context, submission_url,submission_id)
+
+ await self._update_result_status(feedback_request_id, result_status)
+ return feedback, self.model_used, tempalate_used
+
+ except Exception as e:
+ result_status = "Failed"
+ await self._update_result_status(feedback_request_id, result_status, str(e))
+ raise
+
+ async def _update_result_status(self, feedback_request_id: str, status: str, error_message: str = None):
+ """Update Feedback Request result_status"""
+ if not feedback_request_id:
+ return
+
+ update_data = {"result_status": status}
+ if error_message:
+ update_data["error_message"] = error_message[:500] # Truncate long errors
+
+ frappe.db.set_value(
+ "Feedback Request",
+ feedback_request_id,
+ update_data,
+ update_modified=True
+ )
+ frappe.db.commit()
+
+ def _create_ai_generated_feedback(self, plagiarism_data: Dict, assignment_context: Dict) -> Dict:
+ """Create feedback for AI-generated submissions"""
+
+ ai_source = plagiarism_data.get("ai_detection_source", "unknown")
+ ai_confidence = plagiarism_data.get("ai_confidence", 0.0)
+ response = {
+ "overall_feedback": f"Your submission appears to be generated by an \
+ AI tool (detected source: {ai_source}, confidence: {ai_confidence:.0%}). \
+ At MentorMe, we encourage original creative work that reflects your own learning \
+ and artistic development. AI-generated images, while interesting, don't demonstrate \
+ the skills and creativity we're looking to nurture. Please submit your own original \
+ artwork for this assignment.",
+ "strengths": ["N/A - AI-generated content detected"],
+ "areas_for_improvement": ["Submit original artwork created by you",
+ "Review assignment guidelines for creative direction"],
+ "learning_objectives_feedback": ["Unable to assess - submission flagged as AI-generated"],
+ "grade_recommendation": 0,
+ "encouragement": "We believe in your creative abilities!",
+ "plagiarism_output": {
+ "is_plagiarized": False,
+ "is_ai_generated": True,
+ "match_type": "ai_generated",
+ "plagiarism_source": "none",
+ "similarity_score": 0.0,
+ "ai_detection_source": ai_source,
+ "ai_confidence": ai_confidence,
+ }
+ }
+
+ return response
+
+
+ def _create_plagiarism_feedback( self, plagiarism_data: Dict, assignment_context: Dict) -> Dict:
+ """Create feedback for plagiarized submissions"""
+
+ match_type = plagiarism_data.get("match_type")
+ plagiarism_source = plagiarism_data.get("plagiarism_source")
+ similarity_score = plagiarism_data.get("similarity_score", 0.0)
+ ai_confidence = plagiarism_data.get("ai_confidence", 0.0)
+
+ # respond with structured feedback
+ response = {
+ "overall_feedback": f"Your submission has been flagged for similarity \
+ (similarity: {similarity_score:.0%}, source: {plagiarism_source}).\
+ Academic integrity is fundamental to the learning process. Please ensure your \
+ submissions represent your own original work.",
+ "strengths": ["N/A - Submission flagged for similarity"],
+ "areas_for_improvement": ["Create original artwork for this assignment",
+ "Review academic integrity guidelines"],
+ "learning_objectives_feedback": ["Unable to assess - submission flagged for similarity"],
+ "grade_recommendation": 0,
+ "encouragement": "Every artist develops their unique style through practice!",
+ "plagiarism_output": {
+ "is_plagiarized": True,
+ "is_ai_generated": False,
+ "match_type": match_type,
+ "plagiarism_source": plagiarism_source,
+ "similarity_score": similarity_score,
+ "ai_detection_source": "none",
+ "ai_confidence": ai_confidence,
+ }
+ }
+
+ return response
+
+
+
def validate_feedback_structure(self, feedback: Dict, expected_format: Dict) -> Dict:
"""Ensure feedback has all required fields with correct types"""
diff --git a/rag_service/handlers/feedback_handler.py b/rag_service/handlers/feedback_handler.py
index b4b2e79..dc75f64 100644
--- a/rag_service/handlers/feedback_handler.py
+++ b/rag_service/handlers/feedback_handler.py
@@ -22,6 +22,23 @@ async def handle_submission(self, message_data: Dict) -> None:
try:
print("\n=== Processing New Submission ===")
print(f"Submission ID: {message_data.get('submission_id')}")
+
+
+ submission_id = message_data.get("submission_id")
+ is_plagiarized = message_data.get("is_plagiarized", False)
+ match_type = message_data.get("match_type", "original")
+ similarity_score = message_data.get("similarity_score", 0.0)
+ plagiarism_source = message_data.get("plagiarism_source", "none")
+ similar_sources = message_data.get("similar_sources", [])
+ is_ai_generated = message_data.get("is_ai_generated", False)
+ ai_detection_source = message_data.get("ai_detection_source", "unknown")
+ ai_confidence = message_data.get("ai_confidence", 0.0)
+
+ print(f"\nPlagiarism Check - ID: {submission_id}, \
+ Plagiarized: {is_plagiarized}, Match Type: {match_type}, \
+ Similarity Score: {similarity_score}, Source: {plagiarism_source}, \
+ Similar Sources: {similar_sources}, \
+ AI Generated: {is_ai_generated}, AI Confidence: {ai_confidence}")
# Create or update feedback request
request_id = await self.create_feedback_request(message_data)
@@ -38,15 +55,17 @@ async def handle_submission(self, message_data: Dict) -> None:
print("\nGenerating feedback...")
# Generate feedback
- feedback = await self.langchain_manager.generate_feedback(
+ feedback, model_used, template_used = await self.langchain_manager.generate_feedback(
assignment_context=assignment_context,
submission_url=message_data["img_url"],
- submission_id=request_id
+ submission_id=request_id,
+ plagiarism_data=message_data,
+ feedback_request_id=request_id
)
print("\nFeedback generated, processing feedback...")
# Process and deliver feedback
- await self.feedback_processor.process_feedback(request_id, feedback)
+ await self.feedback_processor.process_feedback(request_id, feedback, model_used, template_used)
print("\nFeedback processing completed")
except Exception as e:
@@ -92,7 +111,13 @@ async def create_feedback_request(self, message_data: Dict) -> str:
"assignment_id": message_data["assignment_id"],
"submission_content": message_data["img_url"],
"plagiarism_score": message_data.get("plagiarism_score", 0.0),
+ "is_plagiarized": message_data.get("is_plagiarized", False),
+ "plagiarism_source": message_data.get("plagiarism_source", "none"),
+ "match_type": message_data.get("match_type", "original"),
+ "is_ai_generated": message_data.get("is_ai_generated", False),
+ "ai_confidence": message_data.get("ai_confidence", 0.0),
"similar_sources": json.dumps(message_data.get("similar_sources", [])),
+ "ai_detection_source": message_data.get("ai_detection_source", "unknown"),
"status": "Processing",
"created_at": datetime.now(),
"processing_attempts": 1
diff --git a/rag_service/rag_service/doctype/feedback_request/feedback_request.js b/rag_service/rag_service/doctype/feedback_request/feedback_request.js
index 2def124..4724608 100644
--- a/rag_service/rag_service/doctype/feedback_request/feedback_request.js
+++ b/rag_service/rag_service/doctype/feedback_request/feedback_request.js
@@ -1,8 +1,27 @@
-// Copyright (c) 2024, TAP and contributors
-// For license information, please see license.txt
+frappe.listview_settings['Feedback Request'] = {
+ add_fields: ["result_status", "is_plagiarized", "is_ai_generated"],
-// frappe.ui.form.on("Feedback Request", {
-// refresh(frm) {
+ get_indicator: function(doc) {
+ const status_map = {
+ "Pending": ["orange", "Pending"],
+ "Success - Original": ["green", "Original"],
+ "Success - Flagged": ["red", "Flagged"],
+ "Failed": ["darkgrey", "Failed"]
+ };
-// },
-// });
+ const [color, label] = status_map[doc.result_status] || ["grey", "Unknown"];
+ return [__(label), color, `result_status,=,${doc.result_status}`];
+ },
+
+ formatters: {
+ result_status: function(value) {
+ const badges = {
+ "Pending": 'Pending',
+ "Success - Original": '✓ Original',
+ "Success - Flagged": '⚠ Flagged',
+ "Failed": '✗ Failed'
+ };
+ return badges[value] || value;
+ }
+ }
+};
diff --git a/rag_service/rag_service/doctype/feedback_request/feedback_request.json b/rag_service/rag_service/doctype/feedback_request/feedback_request.json
index 12aef79..2805452 100644
--- a/rag_service/rag_service/doctype/feedback_request/feedback_request.json
+++ b/rag_service/rag_service/doctype/feedback_request/feedback_request.json
@@ -21,7 +21,14 @@
"model_used",
"processing_attempts",
"error_log",
- "is_archived"
+ "is_archived",
+ "result_status",
+ "is_plagiarized",
+ "match_type",
+ "plagiarism_source",
+ "is_ai_generated",
+ "ai_detection_source",
+ "ai_confidence"
],
"fields": [
{
@@ -82,9 +89,8 @@
},
{
"fieldname": "template_used",
- "fieldtype": "Link",
- "label": "Template Used",
- "options": "Prompt Template"
+ "fieldtype": "Text",
+ "label": "Template Used"
},
{
"fieldname": "model_used",
@@ -107,6 +113,48 @@
"fieldname": "is_archived",
"fieldtype": "Check",
"label": "Is Archived"
+ },
+ {
+ "fieldname": "result_status",
+ "fieldtype": "Select",
+ "label": "Result Status",
+ "options": "Pending\nSuccess - Original\nSuccess - Flagged\nFailed",
+ "default": "Pending",
+ "in_list_view": 1,
+ "in_standard_filter": 1,
+ "description": "Overall feedback generation result status"
+ },
+ {
+ "fieldname": "is_plagiarized",
+ "fieldtype": "Check",
+ "label": "Is Plagiarized"
+ },
+ {
+ "fieldname": "match_type",
+ "fieldtype": "Select",
+ "label": "Match Type",
+ "options": "\noriginal\nexact_duplicate\nnear_duplicate\nsemantic_match\nai_generated\nresubmission_allowed"
+ },
+ {
+ "fieldname": "plagiarism_source",
+ "fieldtype": "Select",
+ "label": "Plagiarism Source",
+ "options": "\nnone\npeer\npeer_collusion\nself_cross_assignment\nself_late_resubmission\nreference\nai_generated"
+ },
+ {
+ "fieldname": "is_ai_generated",
+ "fieldtype": "Check",
+ "label": "Is AI Generated"
+ },
+ {
+ "fieldname": "ai_detection_source",
+ "fieldtype": "Data",
+ "label": "AI Detection Source"
+ },
+ {
+ "fieldname": "ai_confidence",
+ "fieldtype": "Float",
+ "label": "AI Confidence"
}
],
"index_web_pages_for_search": 1,
diff --git a/rag_service/scripts/consumer.py b/rag_service/scripts/consumer.py
new file mode 100644
index 0000000..5a57e70
--- /dev/null
+++ b/rag_service/scripts/consumer.py
@@ -0,0 +1,6 @@
+# Consumer code for testing in bench console
+
+from rag_service.utils.rabbitmq_consumer import RabbitMQConsumer
+consumer = RabbitMQConsumer(debug=True)
+if consumer.test_connection():
+ consumer.start_consuming()
\ No newline at end of file
diff --git a/rag_service/scripts/prompt_templat_change.json b/rag_service/scripts/prompt_templat_change.json
new file mode 100644
index 0000000..7ade36d
--- /dev/null
+++ b/rag_service/scripts/prompt_templat_change.json
@@ -0,0 +1 @@
+{"name":"ah5ptrjtso","owner":"Administrator","creation":"2025-12-18 13:53:46.553373","modified":"2026-01-01 14:42:47.066773","modified_by":"Administrator","docstatus":0,"idx":0,"template_name":"Visual Art Practical","assignment_type":"Practical","system_prompt":"You are an encouraging, knowledgeable educational assistant that provides constructive feedback on student submissions across all subjects and assignment types.\\n\\nEVALUATION GUIDELINES: Adapt your feedback style based on the assignment context:\\n- For Art/Creative assignments: Focus on creativity, artistic expression, visual elements, color, composition, and form\\n- For Programming/Coding assignments: Focus on logic, problem-solving, code structure, and computational thinking \\n- For Math assignments: Focus on mathematical reasoning, problem-solving steps, and correct procedures\\n- For Science assignments: Focus on scientific understanding, observations, and methodology\\n- For Writing assignments: Focus on clarity, organization, vocabulary, and expression\\n- For other subjects: Focus on understanding of concepts, quality of work, and learning demonstration\\n\\nAlways provide feedback that is:\\n- Encouraging and positive while being constructive\\n- Age-appropriate for the student (use simple, clear language)\\n- Under 70 words for overall feedback\\n- Specific to what you observe in the submission\\n- Motivating and confidence-building\\n\\nBegin with positive observations about what the student did well, then provide gentle suggestions for improvement, and end with encouragement to keep learning and improving.\\n\\nCRITICAL: You must ALWAYS respond with valid JSON format, never plain text.\n","user_prompt":"Assignment Context:\\nAssignment Name: {assignment_name}\\nSubject Area: {course_vertical}\\nAssignment Type: {assignment_type}\\nDescription: {assignment_description}\\n\\nLearning Objectives:\\n{learning_objectives}\\n\\nPlease analyze this student submission based on the assignment context above.\\n\\nIMPORTANT: You must respond with this exact JSON format regardless of image content:\\n\\n{\\n \\\"overall_feedback\\\": \\\"If image matches assignment: Provide encouraging, constructive feedback (30-50 words, age-appropriate). If image does NOT match assignment: 'Something went wrong—It looks like your submission is incorrect! I am not able to provide feedback for your submission.'\\\",\\n \\\"strengths\\\": [\\n \\\"If image matches assignment: specific strength with example\\\",\\n \\\"If image matches assignment: specific strength with example\\\"],\\n \\\"areas_for_improvement\\\": [\\n \\\"If image matches assignment: constructive suggestion with actionable advice\\\",\\n \\\"If image matches assignment: constructive suggestion with actionable advice\\\"\\n ],\\n \\\"learning_objectives_feedback\\\": [\\n \\\"If image matches assignment: analysis of learning objective achievement\\\"\\n ],\\n \\\"grade_recommendation\\\": \\\"If image matches assignment: numerical score 0-100, If does NOT match: 0\\\",\\n \\\"encouragement\\\": \\\"If image matches assignment: encouraging statement based on work, If does NOT match: 'Please try resubmitting with content that matches the assignment requirements'\\\"\\n}\n","is_active":1,"version":0,"last_used":"2026-01-01 09:12:47.066764","doctype":"Prompt Template","variables":[],"__last_sync_on":"2026-01-01T12:47:24.301Z"}
\ No newline at end of file
diff --git a/rag_service/scripts/req.txt b/rag_service/scripts/req.txt
new file mode 100644
index 0000000..3eed3dd
--- /dev/null
+++ b/rag_service/scripts/req.txt
@@ -0,0 +1 @@
+pip install langchain==0.0.354 langchain-core==0.1.23 langchain-community==0.0.20 langchain-openai
\ No newline at end of file