Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 16 additions & 2 deletions PosterAgent/parse_raw.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from dotenv import load_dotenv
from utils.src.utils import get_json_from_response
from utils.src.model_utils import parse_pdf
import json
import random
import os

from camel.models import ModelFactory
from camel.agents import ChatAgent
Expand All @@ -28,7 +28,21 @@
import re
import argparse

load_dotenv()
# Load environment variables - look for .env file in project root
from dotenv import load_dotenv
from pathlib import Path

# Find the project root (look for files that indicate project root)
def find_project_root():
current = Path(__file__).resolve()
for parent in current.parents:
if (parent / 'requirements.txt').exists() and (parent / 'PosterAgent').exists():
return parent
return current.parent

project_root = find_project_root()
dotenv_path = project_root / '.env'
load_dotenv(dotenv_path)
IMAGE_RESOLUTION_SCALE = 5.0

pipeline_options = PdfPipelineOptions()
Expand Down
7 changes: 3 additions & 4 deletions camel/models/azure_openai_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,15 +66,15 @@ def __init__(
if model_config_dict is None:
model_config_dict = ChatGPTConfig().as_dict()
api_key = api_key or os.environ.get("AZURE_OPENAI_API_KEY")
url = url or os.environ.get("AZURE_OPENAI_BASE_URL")
url = url or os.environ.get("AZURE_OPENAI_BASE_URL") or os.environ.get("AZURE_OPENAI_ENDPOINT")
super().__init__(
model_type, model_config_dict, api_key, url, token_counter
)

self.api_version = api_version or os.environ.get("AZURE_API_VERSION")
self.api_version = api_version or os.environ.get("AZURE_API_VERSION") or os.environ.get("AZURE_OPENAI_API_VERSION")
self.azure_deployment_name = azure_deployment_name or os.environ.get(
"AZURE_DEPLOYMENT_NAME"
)
) or os.environ.get("AZURE_OPENAI_DEPLOYMENT_NAME")
if self.api_version is None:
raise ValueError(
"Must provide either the `api_version` argument "
Expand All @@ -88,7 +88,6 @@ def __init__(

self._client = AzureOpenAI(
azure_endpoint=str(self._url),
azure_deployment=self.azure_deployment_name,
api_version=self.api_version,
api_key=self._api_key,
timeout=180,
Expand Down
15 changes: 14 additions & 1 deletion utils/poster_eval_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1182,7 +1182,20 @@ def replace_with_caption(match):

def get_questions(paper_text, mode, model_type):
from dotenv import load_dotenv
load_dotenv()
from pathlib import Path

# Find the project root and load .env file
def find_project_root():
current = Path(__file__).resolve()
for parent in current.parents:
if (parent / 'requirements.txt').exists() and (parent / 'PosterAgent').exists():
return parent
return current.parent

project_root = find_project_root()
dotenv_path = project_root / '.env'
load_dotenv(dotenv_path)

agent_name = f'generate_question_{mode}'
with open(f"utils/prompt_templates/{agent_name}.yaml", "r") as f:
config = yaml.safe_load(f)
Expand Down
104 changes: 78 additions & 26 deletions utils/wei_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from PIL import Image
import os
import copy
import json
import io
from utils.src.utils import ppt_to_images
from playwright.sync_api import sync_playwright
Expand All @@ -23,12 +24,31 @@
from utils.pptx_utils import *
from utils.critic_utils import *

def get_model_config_with_overrides(config_class):
"""Get model config with custom overrides from MODEL_CONFIG environment variable"""
# Check for custom model configuration in environment variable
model_config_env = os.environ.get('MODEL_CONFIG', '').strip()
if model_config_env:
try:
custom_config = json.loads(model_config_env)
# Use only the custom config when present, don't merge with defaults
return custom_config
except json.JSONDecodeError as e:
print(f"Warning: Failed to parse MODEL_CONFIG environment variable: {e}")
print(f"Using default model configuration.")

# Only use default config if no MODEL_CONFIG is specified
return config_class().as_dict()

def get_agent_config(model_type):
# Check environment variables for platform override
model_platform_env = os.environ.get('MODEL_PLATFORM', '').lower()

agent_config = {}
if model_type == 'qwen':
agent_config = {
"model_type": ModelType.DEEPINFRA_QWEN_2_5_72B,
"model_config": QwenConfig().as_dict(),
"model_config": get_model_config_with_overrides(QwenConfig),
"model_platform": ModelPlatformType.DEEPINFRA,
}
elif model_type == 'gemini':
Expand Down Expand Up @@ -101,47 +121,79 @@ def get_agent_config(model_type):
elif model_type == 'o3-mini':
agent_config = {
"model_type": ModelType.O3_MINI,
"model_config": ChatGPTConfig().as_dict(),
"model_config": get_model_config_with_overrides(ChatGPTConfig),
"model_platform": ModelPlatformType.OPENAI,
}
elif model_type == 'gpt-4.1':
agent_config = {
"model_type": ModelType.GPT_4_1,
"model_config": ChatGPTConfig().as_dict(),
"model_config": get_model_config_with_overrides(ChatGPTConfig),
"model_platform": ModelPlatformType.OPENAI,
}
elif model_type == 'gpt-4.1-mini':
agent_config = {
"model_type": ModelType.GPT_4_1_MINI,
"model_config": ChatGPTConfig().as_dict(),
"model_config": get_model_config_with_overrides(ChatGPTConfig),
"model_platform": ModelPlatformType.OPENAI,
}
elif model_type == '4o':
agent_config = {
"model_type": ModelType.GPT_4O,
"model_config": ChatGPTConfig().as_dict(),
"model_platform": ModelPlatformType.OPENAI,
# "model_name": '4o'
}
# Check if Azure platform is requested via environment variable
if model_platform_env == 'azure':
agent_config = {
"model_type": ModelType.GPT_4O,
"model_config": get_model_config_with_overrides(ChatGPTConfig),
"model_platform": ModelPlatformType.AZURE,
}
else:
agent_config = {
"model_type": ModelType.GPT_4O,
"model_config": get_model_config_with_overrides(ChatGPTConfig),
"model_platform": ModelPlatformType.OPENAI,
# "model_name": '4o'
}
elif model_type == '4o-mini':
agent_config = {
"model_type": ModelType.GPT_4O_MINI,
"model_config": ChatGPTConfig().as_dict(),
"model_platform": ModelPlatformType.OPENAI,
}
# Check if Azure platform is requested via environment variable
if model_platform_env == 'azure':
agent_config = {
"model_type": ModelType.GPT_4O_MINI,
"model_config": get_model_config_with_overrides(ChatGPTConfig),
"model_platform": ModelPlatformType.AZURE,
}
else:
agent_config = {
"model_type": ModelType.GPT_4O_MINI,
"model_config": get_model_config_with_overrides(ChatGPTConfig),
"model_platform": ModelPlatformType.OPENAI,
}
elif model_type == 'o1':
agent_config = {
"model_type": ModelType.O1,
"model_config": ChatGPTConfig().as_dict(),
"model_platform": ModelPlatformType.OPENAI,
# "model_name": 'o1'
}
# Check if Azure platform is requested via environment variable
if model_platform_env == 'azure':
agent_config = {
"model_type": ModelType.O1,
"model_config": get_model_config_with_overrides(ChatGPTConfig),
"model_platform": ModelPlatformType.AZURE,
}
else:
agent_config = {
"model_type": ModelType.O1,
"model_config": get_model_config_with_overrides(ChatGPTConfig),
"model_platform": ModelPlatformType.OPENAI,
# "model_name": 'o1'
}
elif model_type == 'o3':
agent_config = {
"model_type": ModelType.O3,
"model_config": ChatGPTConfig().as_dict(),
"model_platform": ModelPlatformType.OPENAI,
}
# Check if Azure platform is requested via environment variable
if model_platform_env == 'azure':
agent_config = {
"model_type": ModelType.O3,
"model_config": get_model_config_with_overrides(ChatGPTConfig),
"model_platform": ModelPlatformType.AZURE,
}
else:
agent_config = {
"model_type": ModelType.O3,
"model_config": get_model_config_with_overrides(ChatGPTConfig),
"model_platform": ModelPlatformType.OPENAI,
}
elif model_type == 'vllm_qwen_vl':
agent_config = {
"model_type": "Qwen/Qwen2.5-VL-7B-Instruct",
Expand Down