diff --git a/app/components/controller/api/routes.py b/app/components/controller/api/routes.py index 3f50b81..04e7610 100644 --- a/app/components/controller/api/routes.py +++ b/app/components/controller/api/routes.py @@ -5,13 +5,15 @@ from http import HTTPStatus from pydantic import ValidationError ## Local -from constants import Defaults, DefaultAPIResponseSchema, Names +from constants import Defaults, Names from logger import logger -from . import service -from .schemas import ( - EditSettingsRequest, - GetSettingsResponse, GetVersionDetailsResponse +from models.api_schemas.controller import ( + GetVersionDetailsResponse, + GetSettingsResponse, EditSettingsRequest, + GetLLMModelsResponse ) +from models.api_schemas.defaults import DefaultAPIResponse +from . import service controller_api = FastAPI() @@ -25,6 +27,21 @@ # ROUTES +@controller_api.get( + "/version", + response_model=GetVersionDetailsResponse, + description=f"Get the {Names.ACE}'s version data" +) +async def get_version_route() -> dict: + try: + return service.get_version_data() + except ValidationError as error: + logger.error(error) + raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR, detail="Version data error!") + except Exception as error: + logger.error(error) + raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR, detail=Defaults.INTERNAL_SERVER_ERROR_MESSAGE) + @controller_api.get( "/settings", response_model=GetSettingsResponse, @@ -42,13 +59,13 @@ async def get_settings_route() -> dict: @controller_api.post( "/settings", - response_model=DefaultAPIResponseSchema, + response_model=DefaultAPIResponse, description=f"Edit the {Names.ACE} controller settings data" ) async def set_settings_route(updated_settings: EditSettingsRequest) -> dict: try: service.edit_settings_data(updated_settings=updated_settings.model_dump()) - return DefaultAPIResponseSchema(message="Settings data updated successfully!") + return DefaultAPIResponse(message="Settings data updated successfully!") except ValidationError as error: logger.error(error) raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR, detail="Settings data error!") @@ -57,16 +74,31 @@ async def set_settings_route(updated_settings: EditSettingsRequest) -> dict: raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR, detail=Defaults.INTERNAL_SERVER_ERROR_MESSAGE) @controller_api.get( - "/version", - response_model=GetVersionDetailsResponse, - description=f"Get the {Names.ACE}'s version data" + "/model-provider/model-types", + response_model=dict[str, tuple[str, ...]], + description=f"Get the {Names.ACE} available LLM model types" ) -async def get_version_route() -> dict: +async def get_model_types_route() -> dict[str, tuple[str, ...]]: try: - return service.get_version_data() + return service.get_model_types() except ValidationError as error: logger.error(error) - raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR, detail="Version data error!") + raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR, detail="LLM model types data error!") + except Exception as error: + logger.error(error) + raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR, detail=Defaults.INTERNAL_SERVER_ERROR_MESSAGE) + +@controller_api.get( + "/model-provider/model-type/llm", + response_model=list[GetLLMModelsResponse], + description=f"Get the {Names.ACE} available LLM models" +) +async def get_llm_models_route() -> list[GetLLMModelsResponse]: + try: + return service.get_llm_models() + except ValidationError as error: + logger.error(error) + raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR, detail="LLM model types data error!") except Exception as error: logger.error(error) raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR, detail=Defaults.INTERNAL_SERVER_ERROR_MESSAGE) diff --git a/app/components/controller/api/schemas.py b/app/components/controller/api/schemas.py deleted file mode 100644 index ff6135b..0000000 --- a/app/components/controller/api/schemas.py +++ /dev/null @@ -1,26 +0,0 @@ -# DEPENDENCIES -## Third-Party -from pydantic import BaseModel, field_validator -## Local -from constants import Defaults - - -class SettingsSchema(BaseModel): - # These are not required - ace_name: str = Defaults.ACE_NAME - model_provider: str = Defaults.MODEL_PROVIDER - temperature: float = Defaults.TEMPERATURE - - @field_validator("temperature") - def validate_temperature(cls, value): - return min(max(0.0, value), 1.0) - - -# REQUESTS -EditSettingsRequest: type[BaseModel] = SettingsSchema - -# RESPONSES -GetSettingsResponse: type[BaseModel] = SettingsSchema - -class GetVersionDetailsResponse(BaseModel): - version: str diff --git a/app/components/controller/api/service.py b/app/components/controller/api/service.py index 6543bda..ee242ba 100644 --- a/app/components/controller/api/service.py +++ b/app/components/controller/api/service.py @@ -2,8 +2,14 @@ ## Built-In import json ## Local -from constants import DictKeys, Files, ModelProviders -from .schemas import SettingsSchema +from constants import ( + DictKeys, + Files, + ModelProviders, + ModelTypes, ThreeDModelTypes, AudioModelTypes, ImageModelTypes, LLMModelTypes, MultiModalModelTypes, RAGModelTypes, RoboticsModelTypes, VideoModelTypes +) +from models.config.controller import ControllerSettingsSchema +from models.data.initial import INTITAL_LLM_MODEL_PROVIDERS # HELPERS @@ -11,28 +17,43 @@ def _get_settings() -> dict: settings: dict = {} with open(Files.CONTROLLER_SETTINGS, "r", encoding="utf-8") as settings_file: settings = json.loads(settings_file.read()) - settings = SettingsSchema(**settings).model_dump() + settings = ControllerSettingsSchema(**settings).model_dump() with open(Files.CONTROLLER_SETTINGS, "w", encoding="utf-8") as settings_file: settings_file.write(json.dumps(settings)) return settings -# ROUTES +# GENERAL +def get_version_data() -> dict: + with open(Files.VERSION, "r", encoding="utf-8") as settings_file: + return json.loads(settings_file.read()) + def get_settings_data() -> dict: return _get_settings() def edit_settings_data(updated_settings: dict): settings: dict = _get_settings() - new_model_provider: str | None = updated_settings.get(DictKeys.MODEL_PROVIDER) - if new_model_provider: - available_model_providers: dict = ModelProviders.get_frozenset() - if new_model_provider not in available_model_providers: - raise ValueError(f"Invalid model provider: {new_model_provider}") settings.update(updated_settings) with open(Files.CONTROLLER_SETTINGS, "w", encoding="utf-8") as settings_file: settings_file.write(json.dumps(settings)) -def get_version_data() -> dict: - with open(Files.VERSION, "r", encoding="utf-8") as settings_file: - return json.loads(settings_file.read()) - \ No newline at end of file +# MODEL PROVIDERS +## Model Types +def get_model_types() -> dict[str, tuple[str, ...]]: + return { + ModelTypes.THREE_D: ThreeDModelTypes.get_tuple(), + ModelTypes.AUDIO: AudioModelTypes.get_tuple(), + ModelTypes.IMAGE: ImageModelTypes.get_tuple(), + ModelTypes.LLM: LLMModelTypes.get_tuple(), + ModelTypes.MULTIMODAL: MultiModalModelTypes.get_tuple(), + ModelTypes.RAG: RAGModelTypes.get_tuple(), + ModelTypes.ROBOTICS: RoboticsModelTypes.get_tuple(), + ModelTypes.VIDEO: VideoModelTypes.get_tuple() + } + +def get_llm_models() -> list[dict]: + llm_model_providers: list[dict] = [initial_llm_model_provider.model_dump() for initial_llm_model_provider in INTITAL_LLM_MODEL_PROVIDERS] + with open(Files.CONTROLLER_LLM_MODELS, "r", encoding="utf-8") as llm_models_file: + llm_models: dict = json.loads(llm_models_file.read()) + llm_model_providers.extend(llm_models) + return llm_model_providers diff --git a/app/components/ui/src/app/app.component.ts b/app/components/ui/src/app/app.component.ts index 9ee1c4f..c2dbbeb 100644 --- a/app/components/ui/src/app/app.component.ts +++ b/app/components/ui/src/app/app.component.ts @@ -57,6 +57,11 @@ export class AppComponent implements OnInit { icon: "chat", route: "chat" }, + { + name: "Model Garden", + icon: "local_florist", + route: "model-garden" + }, { name: "Settings", icon: "settings", diff --git a/app/components/ui/src/app/app.routes.ts b/app/components/ui/src/app/app.routes.ts index de81ebe..0f8f38a 100644 --- a/app/components/ui/src/app/app.routes.ts +++ b/app/components/ui/src/app/app.routes.ts @@ -2,11 +2,13 @@ import { Routes } from "@angular/router"; import { HomeComponent } from "./pages/home/home.component"; import { DashboardComponent } from "./pages/dashboard/dashboard.component"; import { ChatComponent } from "./pages/chat/chat.component"; +import { ModelGardenComponent } from "./pages/model_garden/model-garden.component"; import { SettingsComponent } from "./pages/settings/settings.component"; export const routes: Routes = [ { path: "", component: HomeComponent }, { path: "dashboard", component: DashboardComponent }, { path: "chat", component: ChatComponent }, + { path: "model-garden", component: ModelGardenComponent }, { path: "settings", component: SettingsComponent }, ]; diff --git a/app/components/ui/src/app/pages/model_garden/model-garden.component.html b/app/components/ui/src/app/pages/model_garden/model-garden.component.html new file mode 100644 index 0000000..16868f3 --- /dev/null +++ b/app/components/ui/src/app/pages/model_garden/model-garden.component.html @@ -0,0 +1 @@ +

MODEL GARDEN PAGE

diff --git a/app/components/ui/src/app/pages/model_garden/model-garden.component.scss b/app/components/ui/src/app/pages/model_garden/model-garden.component.scss new file mode 100644 index 0000000..e69de29 diff --git a/app/components/ui/src/app/pages/model_garden/model-garden.component.ts b/app/components/ui/src/app/pages/model_garden/model-garden.component.ts new file mode 100644 index 0000000..78f400b --- /dev/null +++ b/app/components/ui/src/app/pages/model_garden/model-garden.component.ts @@ -0,0 +1,9 @@ +import { Component } from "@angular/core"; + +@Component({ + selector: "page-model-garden", + imports: [], + templateUrl: "./model-garden.component.html", + styleUrl: "./model-garden.component.scss" +}) +export class ModelGardenComponent {} diff --git a/app/constants/__init__.py b/app/constants/__init__.py index 55f06ae..ac3725b 100644 --- a/app/constants/__init__.py +++ b/app/constants/__init__.py @@ -1,12 +1,14 @@ from .components import Components from .container_folders import ContainerFolders -from .defaults import Defaults, DefaultAPIResponseSchema +from .defaults import Defaults from .dict_keys import DictKeys from .environment_variables import EnvironmentVariables from .folders import Folders # Folders need to be created before files! from .files import Files +from .layer_types import LayerTypes from .logger import CustomLogLevels, TERMINAL_COLOR_CODES from .model_providers import ModelProviders +from .model_types import ModelTypes, ThreeDModelTypes, ImageModelTypes, AudioModelTypes, LLMModelTypes, MultiModalModelTypes, RAGModelTypes, RoboticsModelTypes, VideoModelTypes from .names import Names from .network import NetworkPorts from .shell_commands import ShellCommands diff --git a/app/constants/defaults.py b/app/constants/defaults.py index 0cc08fd..3472c9d 100644 --- a/app/constants/defaults.py +++ b/app/constants/defaults.py @@ -1,9 +1,6 @@ # DEPENDENCIES -## Third-Party -from pydantic import BaseModel ## Local from .base_enum import BaseEnum -from .model_providers import ModelProviders class Defaults(BaseEnum): @@ -12,13 +9,9 @@ class Defaults(BaseEnum): # Layers ACE_NAME: str = "PrototypeACE" # Model Provider - MODEL_PROVIDER: str = ModelProviders.OLLAMA - TEMPERATURE: float = 0.2 + CREATIVE_TEMPERATURE: float = 0.7 + LOGICAL_TEMPERATURE: float = 0.2 + OUTPUT_TOKEN_LIMIT: int = 2048 # Logger TERMINAL_COLOR_CODE: str = "\033[0m" # Default color SHUTDOWN_MESSAGE: str = "Shutting down logger..." - - -# SCHEMAS -class DefaultAPIResponseSchema(BaseModel): - message: str diff --git a/app/constants/dict_keys.py b/app/constants/dict_keys.py index 43f37ca..7f0fe16 100644 --- a/app/constants/dict_keys.py +++ b/app/constants/dict_keys.py @@ -8,9 +8,11 @@ class DictKeys(BaseEnum): DEV: str = "dev" BUILD: str = "build" FUNCTION_NAME: str = "function_name" + LAYER_SETTINGS: str = "layer_settings" LEVEL: str = "level" MESSAGE: str = "message" MODEL_PROVIDER: str = "model_provider" + MODEL_PROVIDER_SETTINGS: str = "model_provider_settings" PROD: str = "prod" REBUILD_DATE: str = "rebuild_date" RESTART: str = "restart" diff --git a/app/constants/files.py b/app/constants/files.py index 06d45a2..31f23f5 100644 --- a/app/constants/files.py +++ b/app/constants/files.py @@ -23,12 +23,28 @@ class Files(BaseEnum): VERSION: str = "version" # Storage CONTROLLER_SETTINGS: str = f"{Folders.CONTROLLER_STORAGE}.settings" + CONTROLLER_THREE_D_MODELS: str = f"{Folders.CONTROLLER_MODEL_TYPES}.three_d_models" + CONTROLLER_AUDIO_MODELS: str = f"{Folders.CONTROLLER_MODEL_TYPES}.audio_models" + CONTROLLER_IMAGE_MODELS: str = f"{Folders.CONTROLLER_MODEL_TYPES}.image_models" + CONTROLLER_LLM_MODELS: str = f"{Folders.CONTROLLER_MODEL_TYPES}.llm_models" + CONTROLLER_MULTIMODAL_MODELS: str = f"{Folders.CONTROLLER_MODEL_TYPES}.multimodal_models" + CONTROLLER_RAG_MODELS: str = f"{Folders.CONTROLLER_MODEL_TYPES}.rag_models" + CONTROLLER_ROBOTICS_MODELS: str = f"{Folders.CONTROLLER_MODEL_TYPES}.robotics_models" + CONTROLLER_VIDEO_MODELS: str = f"{Folders.CONTROLLER_MODEL_TYPES}.video_models" # INIT _ENSURE_JSON_FILES: frozenset[str] = frozenset([ Files.STARTUP_HISTORY, - Files.CONTROLLER_SETTINGS + Files.CONTROLLER_SETTINGS, + Files.CONTROLLER_THREE_D_MODELS, + Files.CONTROLLER_AUDIO_MODELS, + Files.CONTROLLER_IMAGE_MODELS, + Files.CONTROLLER_LLM_MODELS, + Files.CONTROLLER_MULTIMODAL_MODELS, + Files.CONTROLLER_RAG_MODELS, + Files.CONTROLLER_ROBOTICS_MODELS, + Files.CONTROLLER_VIDEO_MODELS ]) def _ensure_json_files(): for file in _ENSURE_JSON_FILES: @@ -78,12 +94,6 @@ def _ensure_json_files(): "{{ controller_host_path }}": Folders.CONTROLLER_STORAGE, "{{ controller_container_path }}": ContainerFolders.CONTROLLER_STORAGE, "{{ controller_volume }}": f"{Names.ACE}_{Components.CONTROLLER}_{Names.VOLUME}", - "{{ layers_host_path }}": Folders.LAYERS_STORAGE, - "{{ layers_container_path }}": ContainerFolders.LAYERS_STORAGE, - "{{ layers_volume }}": f"{Names.ACE}_layers_{Names.VOLUME}", - "{{ model_provider_host_path }}": Folders.MODEL_PROVIDER_STORAGE, - "{{ model_provider_container_path }}": ContainerFolders.MODEL_PROVIDER_STORAGE, - "{{ model_provider_volume }}": f"{Names.ACE}_{Components.MODEL_PROVIDER}_{Names.VOLUME}", "{{ output_host_path }}": Folders.OUTPUT_STORAGE, "{{ output_container_path }}": ContainerFolders.OUTPUT_STORAGE, "{{ output_volume }}": f"{Names.ACE}_output_{Names.VOLUME}" diff --git a/app/constants/folders.py b/app/constants/folders.py index 44f0b72..4247bc0 100644 --- a/app/constants/folders.py +++ b/app/constants/folders.py @@ -16,8 +16,7 @@ class Folders(BaseEnum): STORAGE: str = ".storage/" _HOST_STORAGE: str = f"{os.getcwd()}/{STORAGE}" CONTROLLER_STORAGE: str = f"{_HOST_STORAGE}controller/" - LAYERS_STORAGE: str = f"{_HOST_STORAGE}layers/" - MODEL_PROVIDER_STORAGE: str = f"{_HOST_STORAGE}model_provider/" + CONTROLLER_MODEL_TYPES: str = f"{CONTROLLER_STORAGE}model_types/" OUTPUT_STORAGE: str = f"{_HOST_STORAGE}output/" @@ -26,8 +25,7 @@ class Folders(BaseEnum): Folders.LOGS, Folders.STORAGE, Folders.CONTROLLER_STORAGE, - Folders.LAYERS_STORAGE, - Folders.MODEL_PROVIDER_STORAGE, + Folders.CONTROLLER_MODEL_TYPES, Folders.OUTPUT_STORAGE ) diff --git a/app/constants/layer_types.py b/app/constants/layer_types.py new file mode 100644 index 0000000..8b45a52 --- /dev/null +++ b/app/constants/layer_types.py @@ -0,0 +1,13 @@ +# DEPENDENCIES +## Local +from .base_enum import BaseEnum +from .components import Components + + +class LayerTypes(BaseEnum): + ASPIRATIONAL: str = Components.ASPIRATIONAL + GLOBAL_STRATEGY: str = Components.GLOBAL_STRATEGY + AGENT_MODEL: str = Components.AGENT_MODEL + EXECUTIVE_FUNCTION: str = Components.EXECUTIVE_FUNCTION + COGNITIVE_CONTROL: str = Components.COGNITIVE_CONTROL + TASK_PROSECUTION: str = Components.TASK_PROSECUTION diff --git a/app/constants/model_providers.py b/app/constants/model_providers.py index 1b9d9c9..e9c7531 100644 --- a/app/constants/model_providers.py +++ b/app/constants/model_providers.py @@ -5,7 +5,7 @@ class ModelProviders(BaseEnum): CLAUDE: str = "claude" - DEEPSEEK: str = "deepsee" + DEEPSEEK: str = "deepseek" GOOGLE_VERTEX_AI: str = "google_vertex_ai" GROK: str = "grok" GROQ: str = "groq" diff --git a/app/constants/model_types.py b/app/constants/model_types.py new file mode 100644 index 0000000..1edd62f --- /dev/null +++ b/app/constants/model_types.py @@ -0,0 +1,49 @@ +# DEPENDENCIES +## Local +from .base_enum import BaseEnum + + +# BASE +class ModelTypes(BaseEnum): + THREE_D: str = "3d" + AUDIO: str = "audio" + IMAGE: str = "image" + LLM: str = "llm" + MULTIMODAL: str = "multimodal" + RAG: str = "rag" + ROBOTICS: str = "robotics" + VIDEO: str = "video" + + +# INDIVIDUAL +class ThreeDModelTypes(BaseEnum): + THREED_MODEL_GENERATOR: str = "3d_model_generator" + +class AudioModelTypes(BaseEnum): + AUDIO_GENERATOR: str = "audio_generator" + AUDIO_TRANSCRIPTIONIST: str = "audio_transcriptionist" + +class ImageModelTypes(BaseEnum): + IMAGE_GENERATOR: str = "image_generator" + +class LLMModelTypes(BaseEnum): + CODER: str = "coder" + EFFICIENT: str = "efficient" + FUNCTION_CALLER: str = "function_caller" + GENERALIST: str = "generalist" + REASONER: str = "reasoner" + +class MultiModalModelTypes(BaseEnum): + AUDIO_ONLY_MULTIMODAL: str = "audio_only_multimodal" + FULLY_MULTIMODAL: str = "fully_multimodal" + IMAGE_ONLY_MULTIMODAL: str = "image_only_multimodal" + +class RAGModelTypes(BaseEnum): + EMBEDDER: str = "embedder" + RERANKER: str = "reranker" + +class RoboticsModelTypes(BaseEnum): + ROBOTICS_CONTROLLER: str = "robotics_controller" + +class VideoModelTypes(BaseEnum): + VIDEO_GENERATOR: str = "video_generator" diff --git a/app/containers/template_deployment.yaml b/app/containers/template_deployment.yaml index 47d0d85..5cb1dc9 100644 --- a/app/containers/template_deployment.yaml +++ b/app/containers/template_deployment.yaml @@ -24,8 +24,6 @@ spec: name: {{ app_volume }} - mountPath: {{ controller_container_path }} name: {{ controller_volume }} - - mountPath: {{ layers_container_path }} - name: {{ layers_volume }} - mountPath: {{ logs_container_path }} name: {{ logs_volume }} - command: @@ -78,8 +76,6 @@ spec: volumeMounts: - mountPath: {{ app_container_path }} name: {{ app_volume }} - - mountPath: {{ model_provider_container_path }} - name: {{ model_provider_volume }} - mountPath: {{ logs_container_path }} name: {{ logs_volume }} - command: @@ -148,8 +144,6 @@ spec: volumeMounts: - mountPath: {{ app_container_path }} name: {{ app_volume }} - - mountPath: {{ layers_container_path }} - name: {{ layers_volume }} - mountPath: {{ logs_container_path }} name: {{ logs_volume }} - command: @@ -167,8 +161,6 @@ spec: volumeMounts: - mountPath: {{ app_container_path }} name: {{ app_volume }} - - mountPath: {{ layers_container_path }} - name: {{ layers_volume }} - mountPath: {{ logs_container_path }} name: {{ logs_volume }} - command: @@ -186,8 +178,6 @@ spec: volumeMounts: - mountPath: {{ app_container_path }} name: {{ app_volume }} - - mountPath: {{ layers_container_path }} - name: {{ layers_volume }} - mountPath: {{ logs_container_path }} name: {{ logs_volume }} - command: @@ -205,8 +195,6 @@ spec: volumeMounts: - mountPath: {{ app_container_path }} name: {{ app_volume }} - - mountPath: {{ layers_container_path }} - name: {{ layers_volume }} - mountPath: {{ logs_container_path }} name: {{ logs_volume }} - command: @@ -224,8 +212,6 @@ spec: volumeMounts: - mountPath: {{ app_container_path }} name: {{ app_volume }} - - mountPath: {{ layers_container_path }} - name: {{ layers_volume }} - mountPath: {{ logs_container_path }} name: {{ logs_volume }} - command: @@ -243,8 +229,6 @@ spec: volumeMounts: - mountPath: {{ app_container_path }} name: {{ app_volume }} - - mountPath: {{ layers_container_path }} - name: {{ layers_volume }} - mountPath: {{ output_container_path }} name: {{ output_volume }} - mountPath: {{ logs_container_path }} @@ -262,14 +246,6 @@ spec: path: {{ controller_host_path }} type: Directory name: {{ controller_volume }} - - hostPath: - path: {{ layers_host_path }} - type: Directory - name: {{ layers_volume }} - - hostPath: - path: {{ model_provider_host_path }} - type: Directory - name: {{ model_provider_volume }} - hostPath: path: {{ output_host_path }} type: Directory diff --git a/app/models/__init__.py b/app/models/__init__.py new file mode 100644 index 0000000..511074a --- /dev/null +++ b/app/models/__init__.py @@ -0,0 +1,33 @@ +# DEPENDENCIES +## Local +from logger import logger +from .config.defaults import DEFAULT_LLM_MODEL_TYPE_SETTINGS +from .data.required import REQUIRED_LLM_MODEL_TYPES + + +# TODO: Orchestrate data checks, validation and population here + +def _initialise_database_with_defaults(): + # TODO: Implement this + pass + +def _verify_required_model_types(): + def _verify_required_model_type(category: str, default_settings: list, required_types: frozenset[str]): + verified_types: list[str] = [] + for type_setting in default_settings: + if type_setting.model_type in verified_types: + continue + if type_setting.model_type in required_types: + verified_types.append(type_setting.model_type) + if len(verified_types) != len(required_types): + raise ValueError(f"Missing required LLM types: {required_types - set(verified_types)}") + + _verify_required_model_type( + category="LLM", + default_settings=DEFAULT_LLM_MODEL_TYPE_SETTINGS, + required_types=REQUIRED_LLM_MODEL_TYPES + ) + +def initialise(): + _initialise_database_with_defaults() + _verify_required_model_types() diff --git a/app/models/api_schemas/__init__.py b/app/models/api_schemas/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/models/api_schemas/controller.py b/app/models/api_schemas/controller.py new file mode 100644 index 0000000..4e6004c --- /dev/null +++ b/app/models/api_schemas/controller.py @@ -0,0 +1,19 @@ +# DEPENDENCIES +## Third-Party +from pydantic import BaseModel +## Local +from models.config.controller import ControllerSettingsSchema +from models.data.model_providers import LLMModelProvider + + +# REQUESTS +EditSettingsRequest: type[BaseModel] = ControllerSettingsSchema + + +# RESPONSES +class GetVersionDetailsResponse(BaseModel): + version: str + +GetSettingsResponse: type[BaseModel] = ControllerSettingsSchema + +GetLLMModelsResponse: type[BaseModel] = LLMModelProvider diff --git a/app/models/api_schemas/defaults.py b/app/models/api_schemas/defaults.py new file mode 100644 index 0000000..20c01ab --- /dev/null +++ b/app/models/api_schemas/defaults.py @@ -0,0 +1,11 @@ +# DEPENDENCIES +## Third-Party +from pydantic import BaseModel + + +# REQUESTS + + +# RESPONSES +class DefaultAPIResponse(BaseModel): + message: str diff --git a/app/models/config/__init__.py b/app/models/config/__init__.py new file mode 100644 index 0000000..2d8045f --- /dev/null +++ b/app/models/config/__init__.py @@ -0,0 +1 @@ +from .model_providers import ModelProviderSettings \ No newline at end of file diff --git a/app/models/config/controller.py b/app/models/config/controller.py new file mode 100644 index 0000000..2845931 --- /dev/null +++ b/app/models/config/controller.py @@ -0,0 +1,14 @@ +# DEPENDENCIES +## Third-Party +from pydantic import BaseModel +## Local +from constants import Defaults +from .defaults import DEFAULT_LAYER_SETTINGS, DEFAULT_MODEL_PROVIDER_SETTINGS +from .layers import LayerSettings +from .model_providers import ModelProviderSettings + + +class ControllerSettingsSchema(BaseModel): + ace_name: str = Defaults.ACE_NAME + layer_settings: list[LayerSettings] = DEFAULT_LAYER_SETTINGS + model_provider_settings: ModelProviderSettings = DEFAULT_MODEL_PROVIDER_SETTINGS diff --git a/app/models/config/defaults.py b/app/models/config/defaults.py new file mode 100644 index 0000000..071b3a7 --- /dev/null +++ b/app/models/config/defaults.py @@ -0,0 +1,94 @@ +# DEPENDENCIES +## Local +from constants import ( + Defaults, + LayerTypes, + LLMModelTypes +) +from .layers import LayerSettings +from .model_providers import ModelProviderSettings, model_types + + +# LAYERS +DEFAULT_LAYER_SETTINGS: list[LayerSettings] = [ + LayerSettings( + layer_name=LayerTypes.ASPIRATIONAL, + model_type=LLMModelTypes.REASONER + ), + LayerSettings( + layer_name=LayerTypes.GLOBAL_STRATEGY, + model_type=LLMModelTypes.REASONER + ), + LayerSettings( + layer_name=LayerTypes.AGENT_MODEL, + model_type=LLMModelTypes.GENERALIST + ), + LayerSettings( + layer_name=LayerTypes.EXECUTIVE_FUNCTION, + model_type=LLMModelTypes.GENERALIST + ), + LayerSettings( + layer_name=LayerTypes.COGNITIVE_CONTROL, + model_type=LLMModelTypes.EFFICIENT + ), + LayerSettings( + layer_name=LayerTypes.TASK_PROSECUTION, + model_type=LLMModelTypes.FUNCTION_CALLER + ) +] + + +# MODEL PROVIDERS +## Model Types +DEFAULT_LLM_MODEL_TYPE_SETTINGS: list[model_types.LLMModelTypeSetting] = [ + model_types.LLMModelTypeSetting( + model_type=LLMModelTypes.CODER, + model_id="0194c745-de29-7bc6-ad6b-f7b5f8d0e414", + creative_temperature=Defaults.CREATIVE_TEMPERATURE, + logical_temperature=Defaults.LOGICAL_TEMPERATURE, + output_token_limit=Defaults.OUTPUT_TOKEN_LIMIT + ), + model_types.LLMModelTypeSetting( + model_type=LLMModelTypes.EFFICIENT, + model_id="0194c74b-ff7d-7c91-a7b2-934a16aafb04", + creative_temperature=Defaults.CREATIVE_TEMPERATURE, + logical_temperature=Defaults.LOGICAL_TEMPERATURE, + output_token_limit=Defaults.OUTPUT_TOKEN_LIMIT + ), + model_types.LLMModelTypeSetting( + model_type=LLMModelTypes.FUNCTION_CALLER, + model_id="0194c751-721a-7470-a277-e76ccdb840b8", + creative_temperature=Defaults.CREATIVE_TEMPERATURE, + logical_temperature=Defaults.LOGICAL_TEMPERATURE, + output_token_limit=Defaults.OUTPUT_TOKEN_LIMIT + ), + model_types.LLMModelTypeSetting( + model_type=LLMModelTypes.GENERALIST, + model_id="0194c751-721a-7470-a277-e76ccdb840b8", + creative_temperature=Defaults.CREATIVE_TEMPERATURE, + logical_temperature=Defaults.LOGICAL_TEMPERATURE, + output_token_limit=Defaults.OUTPUT_TOKEN_LIMIT + ), + model_types.LLMModelTypeSetting( + model_type=LLMModelTypes.REASONER, + model_id="0194c74e-13ea-72d5-9b2d-a46dfa196784", + creative_temperature=Defaults.CREATIVE_TEMPERATURE, + logical_temperature=Defaults.LOGICAL_TEMPERATURE, + output_token_limit=Defaults.OUTPUT_TOKEN_LIMIT + ) +] + +DEFAULT_RAG_MODEL_TYPE_SETTINGS: list[model_types.RAGModelTypeSetting] = [] +# model_types.LLMModelTypeSetting( +# model_type=RAGModelTypes.EMBEDDER, +# model_name="granite-embedding:30m", +# creative_temperature=Defaults.CREATIVE_TEMPERATURE, +# logical_temperature=Defaults.LOGICAL_TEMPERATURE, +# output_token_limit=Defaults.OUTPUT_TOKEN_LIMIT +# ), + +## Overall +DEFAULT_MODEL_PROVIDER_SETTINGS = ModelProviderSettings( + llm_model_type_settings=DEFAULT_LLM_MODEL_TYPE_SETTINGS, + rag_model_type_settings=DEFAULT_RAG_MODEL_TYPE_SETTINGS +) diff --git a/app/models/config/layers.py b/app/models/config/layers.py new file mode 100644 index 0000000..2eb0a33 --- /dev/null +++ b/app/models/config/layers.py @@ -0,0 +1,12 @@ +# DEPENDENCIES +## Built-In +from typing import Literal +## Third-Party +from pydantic import BaseModel +## Local +from constants import LayerTypes, LLMModelTypes + + +class LayerSettings(BaseModel): + layer_name: Literal[*LayerTypes.get_frozenset()] + model_type: Literal[*LLMModelTypes.get_frozenset()] diff --git a/app/models/config/model_providers/__init__.py b/app/models/config/model_providers/__init__.py new file mode 100644 index 0000000..ffc476f --- /dev/null +++ b/app/models/config/model_providers/__init__.py @@ -0,0 +1,2 @@ +from .model_providers import ModelProviderSettings +from .indiividual_providers import IndividualProviderSettings \ No newline at end of file diff --git a/app/models/config/model_providers/indiividual_providers.py b/app/models/config/model_providers/indiividual_providers.py new file mode 100644 index 0000000..08b18a0 --- /dev/null +++ b/app/models/config/model_providers/indiividual_providers.py @@ -0,0 +1,8 @@ +# DEPENDENCIES +## Third-Party +from pydantic import BaseModel + + +class IndividualProviderSettings(BaseModel): + enabled: bool = False + api_key: str = "" diff --git a/app/models/config/model_providers/model_providers.py b/app/models/config/model_providers/model_providers.py new file mode 100644 index 0000000..d35fe33 --- /dev/null +++ b/app/models/config/model_providers/model_providers.py @@ -0,0 +1,25 @@ +# DEPENDENCIES +## Third-Party +from pydantic import BaseModel +## Local +from .indiividual_providers import IndividualProviderSettings +from . import model_types + + +class ModelProviderSettings(BaseModel): + claude_settings: IndividualProviderSettings = IndividualProviderSettings() + deepseek_settings: IndividualProviderSettings = IndividualProviderSettings() + google_vertex_ai_settings: IndividualProviderSettings = IndividualProviderSettings() + grok_settings: IndividualProviderSettings = IndividualProviderSettings() + groq_settings: IndividualProviderSettings = IndividualProviderSettings() + ollama_settings: IndividualProviderSettings = IndividualProviderSettings(enabled=True) + openai_settings: IndividualProviderSettings = IndividualProviderSettings() + + three_d_model_type_settings: list[model_types.ThreeDModelTypeSetting] = [] + audio_model_type_settings: list[model_types.AudioModelTypeSetting] = [] + image_model_type_settings: list[model_types.ImageModelTypeSetting] = [] + llm_model_type_settings: list[model_types.LLMModelTypeSetting] + multimodal_model_type_settings: list[model_types.MultiModalModelTypeSetting] = [] + rag_model_type_settings: list[model_types.RAGModelTypeSetting] + robotics_model_type_settings: list[model_types.RoboticsModelTypeSetting] = [] + video_model_type_settings: list[model_types.VideoModelTypeSetting] = [] diff --git a/app/models/config/model_providers/model_types/__init__.py b/app/models/config/model_providers/model_types/__init__.py new file mode 100644 index 0000000..99931d4 --- /dev/null +++ b/app/models/config/model_providers/model_types/__init__.py @@ -0,0 +1,8 @@ +from .three_d import ThreeDModelTypeSetting +from .audio import AudioModelTypeSetting +from .image import ImageModelTypeSetting +from .llm import LLMModelTypeSetting +from .multimodal import MultiModalModelTypeSetting +from .rag import RAGModelTypeSetting +from .robotics import RoboticsModelTypeSetting +from .video import VideoModelTypeSetting \ No newline at end of file diff --git a/app/models/config/model_providers/model_types/audio.py b/app/models/config/model_providers/model_types/audio.py new file mode 100644 index 0000000..b92802a --- /dev/null +++ b/app/models/config/model_providers/model_types/audio.py @@ -0,0 +1,7 @@ +# DEPENDENCIES +## Third-Party +from pydantic import BaseModel + + +class AudioModelTypeSetting(BaseModel): + pass diff --git a/app/models/config/model_providers/model_types/image.py b/app/models/config/model_providers/model_types/image.py new file mode 100644 index 0000000..39a1895 --- /dev/null +++ b/app/models/config/model_providers/model_types/image.py @@ -0,0 +1,7 @@ +# DEPENDENCIES +## Third-Party +from pydantic import BaseModel + + +class ImageModelTypeSetting(BaseModel): + pass diff --git a/app/models/config/model_providers/model_types/llm.py b/app/models/config/model_providers/model_types/llm.py new file mode 100644 index 0000000..79b3731 --- /dev/null +++ b/app/models/config/model_providers/model_types/llm.py @@ -0,0 +1,23 @@ +# DEPENDENCIES +## Built-In +from typing import Literal +## Third-Party +from pydantic import BaseModel, field_validator +## Local +from constants import LLMModelTypes + + +class LLMModelTypeSetting(BaseModel): + model_type: Literal[*LLMModelTypes.get_frozenset()] + model_id: str + logical_temperature: float + creative_temperature: float + output_token_limit: int + + @field_validator("logical_temperature") + def validate_logical_temperature(cls, value): + return min(max(0.0, value), 2.0) + + @field_validator("creative_temperature") + def validate_creative_temperature(cls, value): + return min(max(0.0, value), 2.0) diff --git a/app/models/config/model_providers/model_types/multimodal.py b/app/models/config/model_providers/model_types/multimodal.py new file mode 100644 index 0000000..ad243c8 --- /dev/null +++ b/app/models/config/model_providers/model_types/multimodal.py @@ -0,0 +1,7 @@ +# DEPENDENCIES +## Third-Party +from pydantic import BaseModel + + +class MultiModalModelTypeSetting(BaseModel): + pass diff --git a/app/models/config/model_providers/model_types/rag.py b/app/models/config/model_providers/model_types/rag.py new file mode 100644 index 0000000..ffa26f8 --- /dev/null +++ b/app/models/config/model_providers/model_types/rag.py @@ -0,0 +1,7 @@ +# DEPENDENCIES +## Third-Party +from pydantic import BaseModel + + +class RAGModelTypeSetting(BaseModel): + pass diff --git a/app/models/config/model_providers/model_types/robotics.py b/app/models/config/model_providers/model_types/robotics.py new file mode 100644 index 0000000..018dfbc --- /dev/null +++ b/app/models/config/model_providers/model_types/robotics.py @@ -0,0 +1,7 @@ +# DEPENDENCIES +## Third-Party +from pydantic import BaseModel + + +class RoboticsModelTypeSetting(BaseModel): + pass diff --git a/app/models/config/model_providers/model_types/three_d.py b/app/models/config/model_providers/model_types/three_d.py new file mode 100644 index 0000000..dfd1627 --- /dev/null +++ b/app/models/config/model_providers/model_types/three_d.py @@ -0,0 +1,7 @@ +# DEPENDENCIES +## Third-Party +from pydantic import BaseModel + + +class ThreeDModelTypeSetting(BaseModel): + pass diff --git a/app/models/config/model_providers/model_types/video.py b/app/models/config/model_providers/model_types/video.py new file mode 100644 index 0000000..059cea6 --- /dev/null +++ b/app/models/config/model_providers/model_types/video.py @@ -0,0 +1,7 @@ +# DEPENDENCIES +## Third-Party +from pydantic import BaseModel + + +class VideoModelTypeSetting(BaseModel): + pass diff --git a/app/models/data/__init__.py b/app/models/data/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/models/data/initial.py b/app/models/data/initial.py new file mode 100644 index 0000000..6215eeb --- /dev/null +++ b/app/models/data/initial.py @@ -0,0 +1,200 @@ +# DEPENDENCIES +## Built-In +from datetime import datetime +## Local +from constants import ModelProviders +from .model_providers import LLMModelProvider + + +# MODEL PROVIDERS +INTITAL_LLM_MODEL_PROVIDERS: list[LLMModelProvider] = [ + # Claude + LLMModelProvider( + id="0194c740-6300-7184-8eff-2be664245b03", + model_provider=ModelProviders.CLAUDE, + name="Claude 3.5 Haiku", + model_name="claude-3-5-haiku-latest", + default=True, + max_input_tokens=200000, + max_output_tokens=8192, + cost_per_million_input_tokens=0.8, + cost_per_million_output_tokens=4, + knowledge_cutoff=datetime(2024,7,1), + rate_limits="50 RPM | 50,000 ITPM | 10,000 OTPM" + ), + LLMModelProvider( + id="0194c740-98bb-76db-95ed-ef6cfa62839b", + model_provider=ModelProviders.CLAUDE, + name="Claude 3.5 Sonnet", + model_name="claude-3-5-sonnet-latest", + default=True, + max_input_tokens=200000, + max_output_tokens=8192, + cost_per_million_input_tokens=3, + cost_per_million_output_tokens=15, + knowledge_cutoff=datetime(2024,4,1), + rate_limits="50 RPM | 40,000 ITPM | 8,000 OTPM" + ), + # DeepSeek + LLMModelProvider( + id="0194c740-cb0b-797e-9106-008f0d6b989a", + model_provider=ModelProviders.DEEPSEEK, + name="DeepSeek Chat", + model_name="deepseek-chat", + default=True, + max_input_tokens=64000, + max_output_tokens=8192, + cost_per_million_input_tokens=0.14, + cost_per_million_output_tokens=0.28, + rate_limits="None", + knowledge_cutoff=datetime(2023,10,1) + ), + LLMModelProvider( + id="0194c740-ecc7-78e2-bd5d-e3e5b967bff6", + model_provider=ModelProviders.DEEPSEEK, + name="DeepSeek Reasoner", + model_name="deepseek-reasoner", + default=True, + max_input_tokens=64000, + max_output_tokens=8192, + cost_per_million_input_tokens=0.55, + cost_per_million_output_tokens=2.19, + rate_limits="None", + knowledge_cutoff=datetime(2023,7,1) + ), + # Google Vertex AI + LLMModelProvider( + id="0194c741-1889-7ed9-8149-9c9e52f4be18", + model_provider=ModelProviders.GOOGLE_VERTEX_AI, + name="Gemini 2.0 Flash Exp", + model_name="gemini-2.0-flash-exp", + default=True, + max_input_tokens=1048576, + max_output_tokens=8192, + cost_per_million_input_tokens=0, + cost_per_million_output_tokens=0, + rate_limits="10 RPM | 4 million TPM | 1,500 RPD", + knowledge_cutoff=datetime(2024,8,1) + ), + LLMModelProvider( + id="0194c741-40f7-7bcf-968b-ca216c6c0943", + model_provider=ModelProviders.GOOGLE_VERTEX_AI, + name="Gemini 1.5 Flash", + model_name="gemini-1.5-flash", + default=True, + max_input_tokens=1048576, + max_output_tokens=8192, + cost_per_million_input_tokens=0.028125, + cost_per_million_output_tokens=0.11, + rate_limits="Free: 15 RPM | 1 million TPM | 1,500 RPD || Pay-as-you-go: 2,000 RPM | 4 million TPM", + knowledge_cutoff=datetime(2023,11,1) + ), + # Grok + LLMModelProvider( + id="0194c741-6a34-76e5-9442-15acf8623224", + model_provider=ModelProviders.GROK, + name="grok-2", + model_name="grok-2-latest", + default=True, + max_input_tokens=131072, + max_output_tokens=131072, + cost_per_million_input_tokens=2, + cost_per_million_output_tokens=10, + rate_limits="None", + knowledge_cutoff=None + ), + # Groq + LLMModelProvider( + id="0194c741-9423-7561-a795-eff3d8e26856", + model_provider=ModelProviders.GROQ, + name="LLama3 70B", + model_name="llama3-70b-8192", + default=True, + max_input_tokens=8192, + max_output_tokens=8192, + cost_per_million_input_tokens=0.59, + cost_per_million_output_tokens=0.79, + rate_limits="30 RPM | 14,000 RPD | 6,000 TPM | 500,000 TPD", + knowledge_cutoff=datetime(2023,12,1) + ), + LLMModelProvider( + id="0194c741-b4dc-7aba-aaf0-8bd569ff1910", + model_provider=ModelProviders.GROQ, + name="LLama3 8B", + model_name="llama3-8b-8192", + default=True, + max_input_tokens=8192, + max_output_tokens=8192, + cost_per_million_input_tokens=0.05, + cost_per_million_output_tokens=0.08, + rate_limits="30 RPM | 14,000 RPD | 6,000 TPM | 500,000 TPD", + knowledge_cutoff=datetime(2023,3,1) + ), + LLMModelProvider( + id="0194c741-e5b0-770b-9632-5a5029281c72", + model_provider=ModelProviders.GROQ, + name="Mixtral 8x7B", + model_name="mixtral-8x7b-32768", + default=True, + max_input_tokens=32768, + max_output_tokens=32768, + cost_per_million_input_tokens=0.24, + cost_per_million_output_tokens=0.24, + rate_limits="30 RPM | 14,000 RPD | 5,000 TPM | 500,000 TPD", + knowledge_cutoff=datetime(2023,12,1) + ), + # OLLAMA + LLMModelProvider( + id="0194c745-de29-7bc6-ad6b-f7b5f8d0e414", + model_provider=ModelProviders.OLLAMA, + name="Qwen 2.5 Coder 3B", + model_name="qwen2.5-coder:3b", + default=True, + max_input_tokens=32768, + max_output_tokens=32768, + cost_per_million_input_tokens=0, + cost_per_million_output_tokens=0, + rate_limits="Not Applicable", + knowledge_cutoff=datetime(2024,9,1) + ), + LLMModelProvider( + id="0194c74b-ff7d-7c91-a7b2-934a16aafb04", + model_provider=ModelProviders.OLLAMA, + name="Dolphin 3.0 LLaMA3 1B", + model_name="nchapman/dolphin3.0-llama3:1b", + default=True, + max_input_tokens=8192, + max_output_tokens=8192, + cost_per_million_input_tokens=0, + cost_per_million_output_tokens=0, + rate_limits="Not Applicable", + knowledge_cutoff=datetime(2023,12,1) + ), + LLMModelProvider( + id="0194c751-721a-7470-a277-e76ccdb840b8", + model_provider=ModelProviders.OLLAMA, + name="Dolphin 3.0 LLaMA3 3B", + model_name="nchapman/dolphin3.0-llama3:3b", + default=True, + max_input_tokens=8192, + max_output_tokens=8192, + cost_per_million_input_tokens=0, + cost_per_million_output_tokens=0, + rate_limits="Not Applicable", + knowledge_cutoff=datetime(2023,12,1) + ), + LLMModelProvider( + id="0194c74e-13ea-72d5-9b2d-a46dfa196784", + model_provider=ModelProviders.OLLAMA, + name="Deepseek R1 1.5B", + model_name="deepseek-r1:1.5b", + default=True, + max_input_tokens=32768, + max_output_tokens=32768, + cost_per_million_input_tokens=0, + cost_per_million_output_tokens=0, + rate_limits="Not Applicable", + knowledge_cutoff=datetime(2023,7,1) + ), + # OpenAI +] \ No newline at end of file diff --git a/app/models/data/model_providers.py b/app/models/data/model_providers.py new file mode 100644 index 0000000..288eeae --- /dev/null +++ b/app/models/data/model_providers.py @@ -0,0 +1,23 @@ +# DEPENDENCIES +## Built-In +from datetime import datetime +from typing import Literal +## Third-Party +from pydantic import BaseModel +## Local +from constants import ModelProviders + + +# BASE +class LLMModelProvider(BaseModel): + id: str + model_provider: Literal[*ModelProviders.get_frozenset()] + name: str + model_name: str + default: bool = False + max_input_tokens: int + max_output_tokens: int + cost_per_million_input_tokens: float = 0 + cost_per_million_output_tokens: float = 0 + knowledge_cutoff: datetime | None + rate_limits: str = "Not Available" diff --git a/app/models/data/required.py b/app/models/data/required.py new file mode 100644 index 0000000..83e515f --- /dev/null +++ b/app/models/data/required.py @@ -0,0 +1,22 @@ +# DEPENDENCIES +## Local +from constants import LLMModelTypes, RAGModelTypes + + +# MODEL PROVIDERS +REQUIRED_3D_MODEL_TYPES: frozenset[str] = frozenset() +REQUIRED_AUDIO_MODEL_TYPES: frozenset[str] = frozenset() +REQUIRED_IMAGE_MODEL_TYPES: frozenset[str] = frozenset() +REQUIRED_LLM_MODEL_TYPES: frozenset[str] = frozenset([ + LLMModelTypes.CODER, + LLMModelTypes.EFFICIENT, + LLMModelTypes.FUNCTION_CALLER, + LLMModelTypes.GENERALIST, + LLMModelTypes.REASONER +]) +REQUIRED_MULTIMODAL_MODEL_TYPES: frozenset[str] = frozenset() +REQUIRED_RAG_MODEL_TYPES: frozenset[str] = frozenset([ + RAGModelTypes.EMBEDDER +]) +REQUIRED_ROBOTICS_MODEL_TYPES: frozenset[str] = frozenset() +REQUIRED_VIDEO_MODEL_TYPES: frozenset[str] = frozenset() diff --git a/app/version b/app/version index 9d3bd29..f4db7ab 100644 --- a/app/version +++ b/app/version @@ -1,4 +1,4 @@ { "version": "0.0.0", - "rebuild_date": "2025-02-01" + "rebuild_date": "2025-02-02" } \ No newline at end of file diff --git a/tests/unit/components/controller/api/test_service.py b/tests/unit/components/controller/api/test_service.py index 166ade9..981999d 100644 --- a/tests/unit/components/controller/api/test_service.py +++ b/tests/unit/components/controller/api/test_service.py @@ -5,19 +5,30 @@ ## Third-Party import pytest ## Local -from app.components.controller.api.schemas import SettingsSchema +from app.models.config.controller import ControllerSettingsSchema +from app.models.config.layers import LayerSettings +from app.models.config.model_providers import IndividualProviderSettings, ModelProviderSettings from app.components.controller.api.service import ( _get_settings, edit_settings_data ) -from app.constants import Files, DictKeys, ModelProviders +from app.constants import Files, DictKeys, LayerTypes, LLMModelTypes # CONSTANTS class ExistingSettings: ACE_NAME: str = "existing_name" - MODEL_PROVIDER: str = "existing_provider" - TEMPERATURE: float = 0.5 + LAYER_SETTINGS: list[dict] = [ + LayerSettings( + layer_name=LayerTypes.ASPIRATIONAL, + model_type=LLMModelTypes.EFFICIENT + ).model_dump() + ] + MODEL_PROVIDER_SETTINGS = ModelProviderSettings( + claude_settings=IndividualProviderSettings(enabled=True), + llm_model_type_settings = [], + rag_model_type_settings = [] + ).model_dump() # HELPERS @@ -35,10 +46,10 @@ def _existing_settings_file(): """Sets the settings file to an existing dictionary""" with open(Files.CONTROLLER_SETTINGS, 'w') as f: json.dump( - SettingsSchema( + ControllerSettingsSchema( ace_name=ExistingSettings.ACE_NAME, - model_provider=ExistingSettings.MODEL_PROVIDER, - temperature=ExistingSettings.TEMPERATURE + layer_settings=ExistingSettings.LAYER_SETTINGS, + model_provider_settings=ExistingSettings.MODEL_PROVIDER_SETTINGS ).model_dump(), f ) @@ -48,8 +59,8 @@ def _assert_settings_populated(): with open(Files.CONTROLLER_SETTINGS, 'r') as f: settings = json.load(f) assert DictKeys.ACE_NAME in settings, f"Settings file should contain {DictKeys.ACE_NAME}" - assert DictKeys.MODEL_PROVIDER in settings, f"Settings file should contain {DictKeys.MODEL_PROVIDER}" - assert DictKeys.TEMPERATURE in settings, f"Settings file should contain {DictKeys.TEMPERATURE}" + assert DictKeys.LAYER_SETTINGS in settings, f"Settings file should contain {DictKeys.LAYER_SETTINGS}" + assert DictKeys.MODEL_PROVIDER_SETTINGS in settings, f"Settings file should contain {DictKeys.MODEL_PROVIDER_SETTINGS}" # TESTS @@ -59,8 +70,8 @@ def test_get_settings_populates_empty_file(): settings: dict = _get_settings() assert isinstance(settings, dict), "Settings should be a dictionary" assert DictKeys.ACE_NAME in settings, f"Settings should contain {DictKeys.ACE_NAME}" - assert DictKeys.MODEL_PROVIDER in settings, f"Settings should contain {DictKeys.MODEL_PROVIDER}" - assert DictKeys.TEMPERATURE in settings, f"Settings should contain {DictKeys.TEMPERATURE}" + assert DictKeys.LAYER_SETTINGS in settings, f"Settings should contain {DictKeys.LAYER_SETTINGS}" + assert DictKeys.MODEL_PROVIDER_SETTINGS in settings, f"Settings should contain {DictKeys.MODEL_PROVIDER_SETTINGS}" _assert_settings_populated() def test_get_settings_does_not_overwrite_existing_values(): @@ -71,33 +82,22 @@ def test_get_settings_does_not_overwrite_existing_values(): with open(Files.CONTROLLER_SETTINGS, 'r') as f: settings: dict = json.load(f) assert settings[DictKeys.ACE_NAME] == ExistingSettings.ACE_NAME, f"{DictKeys.ACE_NAME} should not be overwritten" - assert settings[DictKeys.MODEL_PROVIDER] == ExistingSettings.MODEL_PROVIDER, f"{DictKeys.MODEL_PROVIDER} should not be overwritten" - assert settings[DictKeys.TEMPERATURE] == ExistingSettings.TEMPERATURE, f"{DictKeys.TEMPERATURE} should not be overwritten" + assert settings[DictKeys.LAYER_SETTINGS] == ExistingSettings.LAYER_SETTINGS, f"{DictKeys.LAYER_SETTINGS} should not be overwritten" + assert settings[DictKeys.MODEL_PROVIDER_SETTINGS] == ExistingSettings.MODEL_PROVIDER_SETTINGS, f"{DictKeys.MODEL_PROVIDER_SETTINGS} should not be overwritten" def test_edit_settings_data(): """Test that edit_settings_data updates the settings correctly.""" - class UpdatedSettings: - ACE_NAME: str = "new_ace" - MODEL_PROVIDER: str = ModelProviders.OPENAI - TEMPERATURE: float = 0.7 updated_settings = { - DictKeys.ACE_NAME: UpdatedSettings.ACE_NAME, - DictKeys.MODEL_PROVIDER: UpdatedSettings.MODEL_PROVIDER, - DictKeys.TEMPERATURE: UpdatedSettings.TEMPERATURE + DictKeys.ACE_NAME: ExistingSettings.ACE_NAME, + DictKeys.LAYER_SETTINGS: ExistingSettings.LAYER_SETTINGS, + DictKeys.MODEL_PROVIDER_SETTINGS: ExistingSettings.MODEL_PROVIDER_SETTINGS } - _default_settings_file() + _empty_settings_file() edit_settings_data(updated_settings) with open(Files.CONTROLLER_SETTINGS, 'r') as f: new_settings: dict = json.load(f) - assert new_settings[DictKeys.ACE_NAME] == UpdatedSettings.ACE_NAME, f"{DictKeys.ACE_NAME} should be updated" - assert new_settings[DictKeys.MODEL_PROVIDER] == UpdatedSettings.MODEL_PROVIDER, f"{DictKeys.MODEL_PROVIDER} should be updated" - assert new_settings[DictKeys.TEMPERATURE] == UpdatedSettings.TEMPERATURE, f"{DictKeys.TEMPERATURE} should be updated" - -def test_edit_settings_invalid_model_provider(): - """Test that edit_settings_data raises an error for invalid model provider.""" - _default_settings_file() - updated_settings = {DictKeys.MODEL_PROVIDER: "invalid_provider"} - with pytest.raises(ValueError): - edit_settings_data(updated_settings) + assert new_settings[DictKeys.ACE_NAME] == ExistingSettings.ACE_NAME, f"{DictKeys.ACE_NAME} should be updated" + assert new_settings[DictKeys.LAYER_SETTINGS] == ExistingSettings.LAYER_SETTINGS, f"{DictKeys.LAYER_SETTINGS} should be updated" + assert new_settings[DictKeys.MODEL_PROVIDER_SETTINGS] == ExistingSettings.MODEL_PROVIDER_SETTINGS, f"{DictKeys.MODEL_PROVIDER_SETTINGS} should be updated"