From 1a200ee178c73db2ac87820fce1b154c262f35b1 Mon Sep 17 00:00:00 2001 From: Abhaas Goyal Date: Wed, 2 Apr 2025 06:55:16 +1100 Subject: [PATCH] Implement endpoints for model output creation and query Add API Spec v2.1.0 --- .github/workflows/ci.yml | 3 + docs/cli.md | 33 +++ meorg_client/cli.py | 89 ++++++- meorg_client/client.py | 84 ++++-- meorg_client/constants.py | 2 + meorg_client/data/openapi.json | 427 +++++++++++++++++++++++++++++- meorg_client/endpoints.py | 4 + meorg_client/tests/conftest.py | 40 +++ meorg_client/tests/test_cli.py | 26 ++ meorg_client/tests/test_client.py | 24 ++ meorg_client/utilities.py | 4 + 11 files changed, 712 insertions(+), 24 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f9d0e3c..ef9c880 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -34,6 +34,9 @@ jobs: MEORG_EMAIL: ${{ secrets.MEORG_EMAIL }} MEORG_PASSWORD: ${{ secrets.MEORG_PASSWORD }} MEORG_MODEL_OUTPUT_ID: ${{ secrets.MEORG_MODEL_OUTPUT_ID }} + MEORG_MODEL_OUTPUT_NAME: ${{ secrets.MEORG_MODEL_OUTPUT_NAME}} + MEORG_MODEL_PROFILE_ID: ${{ secrets.MEORG_MODEL_PROFILE_ID }} + MEORG_EXPERIMENT_ID: ${{ secrets.MEORG_EXPERIMENT_ID }} run: | conda install pytest pytest -v \ No newline at end of file diff --git a/docs/cli.md b/docs/cli.md index 2690605..db6dd21 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -93,6 +93,39 @@ modelevaluation.org/modelOutput/display/**kafS53HgWu2CDXxgC** This command will return an `$ANALYSIS_ID` upon success which is used in `analysis status`. +### model output create + +To create a model output, execute the following command: + +```shell +meorg output create $MODEL_PROFILE_ID $EXPERIMENT_ID $MODEL_OUTPUT_NAME +``` + +Where `$MODEL_PROFILE_ID` and `$EXPERIMENT_ID` are found on the model profile and corresponding experiment details pages on modelevaluation.org. `$MODEL_OUTPUT_NAME` is a unique name for the newly created model output. + +This command will return the newly created `$MODEL_OUTPUT_ID` upon success which is used for further analysis. It will also print whether an existing model output record was overwritten. + +### model output query + +Retrieve Model output details via `$MODEL_OUTPUT_ID` + +```shell +meorg output query $MODEL_OUTPUT_ID +``` + +This command will print the `id` and `name` of the modeloutput. If developer mode is enabled, print the JSON representation for the model output with metadata. An example model output data response would be: + +```json +{ + "id": "MnCj3tMzGx3NsuzwS", + "name": "temp-output", + "created": "2025-04-04T00:09:44.258Z", + "modified": "2025-04-17T05:12:08.135Z", + "stateSelection": "default model initialisation", + "benchmarks": [] +} +``` + ### analysis status To query the status of an analysis, execute the following command: diff --git a/meorg_client/cli.py b/meorg_client/cli.py index 4ca2f49..0576fbf 100644 --- a/meorg_client/cli.py +++ b/meorg_client/cli.py @@ -3,7 +3,9 @@ import click from meorg_client.client import Client import meorg_client.utilities as mcu +import meorg_client.constants as mcc from meorg_client import __version__ +import json import os import sys import getpass @@ -20,17 +22,16 @@ def _get_client() -> Client: Client object. """ # Get the dev-mode flag from the environment, better than passing the dev flag everywhere. - dev_mode = os.getenv("MEORG_DEV_MODE", "0") == "1" credentials = mcu.get_user_data_filepath("credentials.json") credentials_dev = mcu.get_user_data_filepath("credentials-dev.json") # In dev mode and the configuration file exists - if dev_mode and credentials_dev.is_file(): + if mcu.is_dev_mode() and credentials_dev.is_file(): credentials = mcu.load_user_data("credentials-dev.json") # In dev mode and it doesn't (i.e. Actions) - elif dev_mode and not credentials_dev.is_file(): + elif mcu.is_dev_mode() and not credentials_dev.is_file(): credentials = dict( email=os.getenv("MEORG_EMAIL"), password=os.getenv("MEORG_PASSWORD") ) @@ -41,7 +42,9 @@ def _get_client() -> Client: # Get the client return Client( - email=credentials["email"], password=credentials["password"], dev_mode=dev_mode + email=credentials["email"], + password=credentials["password"], + dev_mode=mcu.is_dev_mode(), ) @@ -68,7 +71,7 @@ def _call(func: callable, **kwargs) -> dict: click.echo(ex.msg, err=True) # Bubble up the exception - if os.getenv("MEORG_DEV_MODE") == "1": + if mcu.is_dev_mode(): raise sys.exit(1) @@ -214,6 +217,72 @@ def analysis_start(id: str): click.echo(analysis_id) +@click.command("create") +@click.argument("mod_prof_id") +@click.argument("exp_id") +@click.argument("name") +def create_new_model_output(mod_prof_id: str, exp_id: str, name: str): + """ + Create a new model output profile. + + + Parameters + ---------- + mod_prof_id : str + Model profile ID. + + exp_id : str + Experiment ID. + + name : str + New model output name + + Prints modeloutput ID of created object, and whether it already existed or not. + """ + client = _get_client() + + response = _call( + client.model_output_create, mod_prof_id=mod_prof_id, exp_id=exp_id, name=name + ) + + if client.success(): + model_output_id = response.get("data").get("modeloutput") + existing = response.get("data").get("existing") + click.echo(f"Model Output ID: {model_output_id}") + if existing is not None: + click.echo("Warning: Overwriting existing model output ID") + return model_output_id + + +@click.command("query") +@click.argument("model_id") +def model_output_query(model_id: str): + """ + Get details for a specific new model output entity + + Parameters + ---------- + model_id : str + Model Output ID. + + Prints the `id` and `name` of the modeloutput, and JSON representation for the remaining metadata. + """ + client = _get_client() + + response = _call(client.model_output_query, model_id=model_id) + + if client.success(): + + model_output_data = response.get("data").get("modeloutput") + model_output_id = model_output_data.get("id") + name = model_output_data.get("name") + if mcu.is_dev_mode(): + click.echo(f"Model Output: {json.dumps(model_output_data, indent=4)}") + else: + click.echo(f"Model Output ID: {model_output_id}") + click.echo(f"Model Output Name: {name}") + + @click.command("status") @click.argument("id") def analysis_status(id: str): @@ -291,6 +360,11 @@ def cli_analysis(): pass +@click.group("output", help="Model output commands.") +def cli_model_output(): + pass + + # Add file commands cli_file.add_command(file_list) cli_file.add_command(file_upload) @@ -304,11 +378,16 @@ def cli_analysis(): cli_analysis.add_command(analysis_start) cli_analysis.add_command(analysis_status) +# Add output command +cli_model_output.add_command(create_new_model_output) +cli_model_output.add_command(model_output_query) + # Add subparsers to the master cli.add_command(cli_endpoints) cli.add_command(cli_file) cli.add_command(cli_analysis) cli.add_command(initialise) +cli.add_command(cli_model_output) if __name__ == "__main__": diff --git a/meorg_client/client.py b/meorg_client/client.py index 0f492fb..3b868e3 100644 --- a/meorg_client/client.py +++ b/meorg_client/client.py @@ -4,7 +4,7 @@ import hashlib as hl import os from typing import Union -from urllib.parse import urljoin +from urllib.parse import urljoin, urlencode from meorg_client.exceptions import RequestException import meorg_client.constants as mcc import meorg_client.endpoints as endpoints @@ -38,9 +38,7 @@ def __init__(self, email: str = None, password: str = None, dev_mode: bool = Fal mt.init() # Dev mode can be set by the user or from the environment - dev_mode = dev_mode or os.getenv("MEORG_DEV_MODE", "0") == "1" - - if dev_mode: + if dev_mode or mu.is_dev_mode(): self.base_url = os.getenv("MEORG_BASE_URL_DEV", None) else: self.base_url = mcc.MEORG_BASE_URL_PROD @@ -56,6 +54,7 @@ def _make_request( self, method: str, endpoint: str, + url_path_fields: dict = {}, url_params: dict = {}, data: dict = {}, json: dict = {}, @@ -72,8 +71,10 @@ def _make_request( HTTP method. endpoint : str URL template for the API endpoint. + url_path_fields : dict, optional + Fields to interpolate into the URL template, by default {} url_params : dict, optional - Parameters to interpolate into the URL template, by default {} + Parameters to add at end of URL, by default {} data : dict, optional Data to send along with the request, by default {} json : dict, optional @@ -106,7 +107,7 @@ def _make_request( # Get the function and URL func = getattr(requests, method.lower()) - url = self._get_url(endpoint, **url_params) + url = self._get_url(endpoint, url_params, **url_path_fields) # Assemble the headers _headers = self._merge_headers(headers) @@ -129,22 +130,29 @@ def _make_request( # For flexibility return self.last_response - def _get_url(self, endpoint: str, **kwargs): + def _get_url(self, endpoint: str, url_params: dict = {}, **url_path_fields: dict): """Get the well-formed URL for the call. Parameters ---------- endpoint : str Endpoint to be appended to the base URL. - **kwargs : - Key/value pairs to interpolate into the URL template. + url_path_fields : dict, optional + Fields to interpolate into the URL template + url_params : dict, optional + Parameters to add at end of URL, by default {} Returns ------- str URL. """ - return urljoin(self.base_url + "/", endpoint).format(**kwargs) + # Add endpoint to base URL, interpolating url_path_fields + url_path = urljoin(self.base_url + "/", endpoint).format(**url_path_fields) + # Add URL parameters (if any) + if url_params: + url_path = f"{url_path}?{urlencode(url_params)}" + return url_path def _merge_headers(self, headers: dict = dict()): """Merge additional headers into the client headers (i.e. Auth) @@ -348,7 +356,7 @@ def _upload_file( method=mcc.HTTP_POST, endpoint=endpoints.FILE_UPLOAD, files=payload, - url_params=dict(id=id), + url_path_fields=dict(id=id), return_json=True, ) @@ -372,7 +380,9 @@ def list_files(self, id: str) -> Union[dict, requests.Response]: Response from ME.org. """ return self._make_request( - method=mcc.HTTP_GET, endpoint=endpoints.FILE_LIST, url_params=dict(id=id) + method=mcc.HTTP_GET, + endpoint=endpoints.FILE_LIST, + url_path_fields=dict(id=id), ) def delete_file_from_model_output(self, id: str, file_id: str): @@ -393,7 +403,7 @@ def delete_file_from_model_output(self, id: str, file_id: str): return self._make_request( method=mcc.HTTP_DELETE, endpoint=endpoints.FILE_DELETE, - url_params=dict(id=id, fileId=file_id), + url_path_fields=dict(id=id, fileId=file_id), ) def delete_all_files_from_model_output(self, id: str): @@ -439,7 +449,51 @@ def start_analysis(self, id: str) -> Union[dict, requests.Response]: return self._make_request( method=mcc.HTTP_PUT, endpoint=endpoints.ANALYSIS_START, - url_params=dict(id=id), + url_path_fields=dict(id=id), + ) + + def model_output_create( + self, mod_prof_id: str, exp_id: str, name: str + ) -> Union[dict, requests.Response]: + """ + Create a new model output entity + Parameters + ---------- + mod_prof_id : str + Model Profile ID + exp_id : str + Experiment ID + name : str + Name of Model Output + + Returns + ------- + Union[dict, requests.Response] + Response from ME.org. + """ + return self._make_request( + method=mcc.HTTP_POST, + endpoint=endpoints.MODEL_OUTPUT_CREATE, + data=dict(experiment=exp_id, model=mod_prof_id, name=name), + ) + + def model_output_query(self, model_id: str) -> Union[dict, requests.Response]: + """ + Get details for a specific new model output entity + Parameters + ---------- + model_id : str + Model Output ID + + Returns + ------- + Union[dict, requests.Response] + Response from ME.org. + """ + return self._make_request( + method=mcc.HTTP_GET, + endpoint=endpoints.MODEL_OUTPUT_QUERY, + url_params=dict(id=model_id), ) def get_analysis_status(self, id: str) -> Union[dict, requests.Response]: @@ -458,7 +512,7 @@ def get_analysis_status(self, id: str) -> Union[dict, requests.Response]: return self._make_request( method=mcc.HTTP_GET, endpoint=endpoints.ANALYSIS_STATUS, - url_params=dict(id=id), + url_path_fields=dict(id=id), ) def list_endpoints(self) -> Union[dict, requests.Response]: diff --git a/meorg_client/constants.py b/meorg_client/constants.py index 8cd4d29..b999264 100644 --- a/meorg_client/constants.py +++ b/meorg_client/constants.py @@ -1,3 +1,5 @@ +import os + """Constants.""" # Valid HTTP methods diff --git a/meorg_client/data/openapi.json b/meorg_client/data/openapi.json index 2b8776f..c9721d3 100644 --- a/meorg_client/data/openapi.json +++ b/meorg_client/data/openapi.json @@ -2,7 +2,7 @@ "openapi": "3.1.0", "info": { "title": "Modelevaluation REST endpoints", - "version": "2.0.0" + "version": "2.1.0" }, "servers": [ { @@ -41,15 +41,13 @@ "description": "Account password" } } - }, - "encoding": { - "explode": false } } } }, "responses": { "200": { + "description": "Generates API-key pair required for authentication", "content": { "application/json": { "schema": { @@ -97,6 +95,427 @@ } } }, + "/modeloutput": { + "get": { + "summary": "Retieve JSON representation from id or unique name", + "description": "Query by MO by id or by unique MO name", + "tags": [ + "Model Outputs" + ], + "security": [ + { + "userId": [], + "authToken": [] + } + ], + "parameters": [ + { + "in": "query", + "name": "id", + "description": "Model output id", + "schema": { + "type": "string" + } + }, + { + "in": "query", + "name": "name", + "description": "Name of model output", + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Object may contain the following fields - \"id\", \"created\", \"modified\", \"state_selection\", \"parameter_selection\", \"comments\", \"is_bundle\", \"benchmarks\"", + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "status": { + "type": "string", + "example": "success" + }, + "data": { + "type": "object", + "properties": { + "modeloutput": { + "type": "object", + "description": "JSON representation of the modeloutput", + "example": { + "id": "me5pq77uhy4bjR78", + "name": "abc", + "benchmarks": [ + "1", + "2", + "3" + ] + } + } + } + } + } + } + } + } + }, + "401": { + "$ref": "#/components/unauthenticated" + }, + "403": { + "$ref": "#/components/unauthorisedResponse" + }, + "404": { + "$ref": "#/components/notFound" + } + } + }, + "post": { + "summary": "Create a new MO entity", + "description": "If model output name is existing, and belongs to the current user, the existing MO's id is returned", + "tags": [ + "Model Outputs" + ], + "security": [ + { + "userId": [], + "authToken": [] + } + ], + "requestBody": { + "description": "Experiment and Model IDs must be existing. Name length restricted to 50 characters", + "required": true, + "content": { + "application/json": { + "schema": { + "type": "object", + "required": [ + "experiment", + "model", + "name" + ], + "properties": { + "experiment": { + "type": "string", + "description": "The ID of the experiment" + }, + "model": { + "type": "string", + "description": "The ID of the model profile. Note!!! Not to be confused with a model output." + }, + "name": { + "type": "string", + "description": "The name of the model output. Length of 50 max." + }, + "state_selection": { + "type": "string", + "description": "One of \"default model initialisation\", \"model spinup on forcing data\", \"states derived directly from measurements\", or \"other\"", + "nullable": true + }, + "parameter_selection": { + "type": "string", + "description": "One of \"automated calibration\", \"manual calibration\", or \"no calibration (model default values)\"", + "nullable": true + }, + "comments": { + "type": "string", + "description": "Additional descriptions about the model output - e.g. configurations", + "nullable": true + }, + "is_bundle": { + "type": "boolean", + "description": "Indicates if the model output is a bundle. If so, this model output will not be a benchmark option for other analyses", + "nullable": true + }, + "benchmarks": { + "type": "array", + "description": "List of benchmarks associated with the model output. Benchmarks must be accessible from the associated experiment", + "items": { + "type": "string" + }, + "nullable": true + } + } + } + } + } + }, + "responses": { + "200": { + "description": "Contains the existing MO Id in data field. It will specify whether an existing record was returned or a new record created", + "content": { + "application/json": { + "schema": { + "properties": { + "status": { + "type": "string", + "example": "success" + }, + "data": { + "type": "object", + "properties": { + "modeloutput": { + "type": "string", + "description": "modeloutput ID of created object", + "example": "me5pq77uhy4bjR78R" + }, + "created": { + "type": "boolean", + "description": "value indicates whether resource was existing or new", + "example": false + } + } + } + } + } + } + } + }, + "400": { + "$ref": "#/components/invalidData" + }, + "401": { + "$ref": "#/components/unauthenticated" + }, + "403": { + "$ref": "#/components/unauthorisedResponse" + } + } + } + }, + "/modeloutput/{id}": { + "patch": { + "summary": "Update specific fields of an existing MO", + "description": "See schema below for detail on fields that may be edited. Note experiments cannot be edited via API.", + "tags": [ + "Model Outputs" + ], + "security": [ + { + "userId": [], + "authToken": [] + } + ], + "parameters": [ + { + "in": "path", + "name": "id", + "required": true, + "description": "Model output id", + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "type": "object", + "description": "Fields that are not included will remain unchanged", + "properties": { + "name": { + "type": "string", + "description": "The name of the model output. Length of 50 max.", + "nullable": true + }, + "model": { + "type": "string", + "description": "The ID of the model profile. Note, not to be confused with a model output", + "nullable": true + }, + "state_selection": { + "type": "string", + "description": "One of \"default model initialisation\", \"model spinup on forcing data\", \"states derived directly from measurements\", or \"other\"", + "nullable": true + }, + "parameter_selection": { + "type": "string", + "description": "One of \"automated calibration\", \"manual calibration\", or \"no calibration (model default values)\"", + "nullable": true + }, + "comments": { + "type": "string", + "description": "Additional descriptions about the model output - e.g. configurations", + "nullable": true + }, + "is_bundle": { + "type": "boolean", + "description": "Indicates if the model output is a bundle. If so, this model output will not be a benchmark option for other analyses", + "nullable": true + }, + "benchmarks": { + "type": "array", + "description": "List of benchmarks associated with the model output. Benchmarks must be accessible from the associated experiment", + "items": { + "type": "string" + }, + "nullable": true + } + } + } + } + } + }, + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "status": { + "type": "string", + "example": "success" + }, + "data": { + "type": "string", + "nullable": true, + "example": null + } + } + } + } + } + }, + "400": { + "$ref": "#/components/invalidData" + }, + "401": { + "$ref": "#/components/unauthenticated" + }, + "403": { + "$ref": "#/components/unauthorisedResponse" + }, + "404": { + "$ref": "#/components/notFound" + } + } + }, + "delete": { + "summary": "Remove a MO", + "description": "All unique data associated will be deleted - e.g. output files", + "tags": [ + "Model Outputs" + ], + "security": [ + { + "userId": [], + "authToken": [] + } + ], + "parameters": [ + { + "in": "path", + "name": "id", + "required": true, + "description": "Model output id", + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "status": { + "type": "string", + "example": "success" + }, + "data": { + "type": "string", + "nullable": true, + "example": null + } + } + } + } + } + }, + "401": { + "$ref": "#/components/unauthenticated" + }, + "403": { + "$ref": "#/components/unauthorisedResponse" + }, + "404": { + "$ref": "#/components/notFound" + } + } + } + }, + "/modeloutput/{id}/available-benchmarks": { + "get": { + "summary": "Retrieve related MOs which may be used as a benchmark", + "description": "Only model outputs that are used in the same experiment can be designated as a benchmark", + "tags": [ + "Model Outputs" + ], + "security": [ + { + "userId": [], + "authToken": [] + } + ], + "parameters": [ + { + "in": "path", + "name": "id", + "required": true, + "description": "Model output id", + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Returns a list of MO ids that may be used as benchmarks", + "content": { + "application/json": { + "schema": { + "properties": { + "status": { + "type": "string", + "example": "success" + }, + "data": { + "type": "object", + "properties": { + "benchmarks": { + "type": "Array", + "items": { + "type": "string", + "description": "modeloutput ID of available benchmark", + "example": "me5pq77uhy4bjR78R" + } + } + } + } + } + } + } + } + }, + "400": { + "$ref": "#/components/invalidData" + }, + "401": { + "$ref": "#/components/unauthenticated" + }, + "403": { + "$ref": "#/components/unauthorisedResponse" + } + } + } + }, "/modeloutput/{id}/start.json": { "get": { "description": "Get the JSON in text form that would be sent to the worker to start analysis.", diff --git a/meorg_client/endpoints.py b/meorg_client/endpoints.py index f61ef30..3ac8446 100644 --- a/meorg_client/endpoints.py +++ b/meorg_client/endpoints.py @@ -16,3 +16,7 @@ # Analysis ANALYSIS_START = "modeloutput/{id}/start" ANALYSIS_STATUS = "analysis/{id}/status" + +# Model Outputs +MODEL_OUTPUT_CREATE = "modeloutput" +MODEL_OUTPUT_QUERY = MODEL_OUTPUT_CREATE diff --git a/meorg_client/tests/conftest.py b/meorg_client/tests/conftest.py index ed859eb..9117906 100644 --- a/meorg_client/tests/conftest.py +++ b/meorg_client/tests/conftest.py @@ -1,4 +1,5 @@ import os +import pytest # Set dev mode os.environ["MEORG_DEV_MODE"] = "1" @@ -27,3 +28,42 @@ def __repr__(self): store.set("email", os.environ.get("MEORG_EMAIL")) store.set("password", os.environ.get("MEORG_PASSWORD")) store.set("model_output_id", os.environ.get("MEORG_MODEL_OUTPUT_ID")) +store.set("experiment_id", os.environ.get("MEORG_EXPERIMENT_ID")) +store.set("model_profile_id", os.environ.get("MEORG_MODEL_PROFILE_ID")) +store.set("model_output_name", os.environ.get("MEORG_MODEL_OUTPUT_NAME")) + + +@pytest.fixture +def model_profile_id() -> str: + """Get the experiment ID out of the environment. + + Returns + ------- + str + Model Profile ID. + """ + return os.getenv("MEORG_MODEL_PROFILE_ID") + + +@pytest.fixture +def experiment_id() -> str: + """Get the experiment ID out of the environment. + + Returns + ------- + str + Experiment ID. + """ + return os.getenv("MEORG_EXPERIMENT_ID") + + +@pytest.fixture +def model_output_name() -> str: + """Get the model output name. + + Returns + ------- + str + Model output name. + """ + return os.getenv("MEORG_MODEL_OUTPUT_NAME") or "meorg-client-model-output" diff --git a/meorg_client/tests/test_cli.py b/meorg_client/tests/test_cli.py index 0b4a633..0023646 100644 --- a/meorg_client/tests/test_cli.py +++ b/meorg_client/tests/test_cli.py @@ -56,6 +56,32 @@ def test_list_endpoints(runner: CliRunner): assert result.exit_code == 0 +def test_create_model_output( + runner: CliRunner, model_profile_id, experiment_id, model_output_name +): + """Test Creation of Model output.""" + result = runner.invoke( + cli.create_new_model_output, + [model_profile_id, experiment_id, model_output_name], + standalone_mode=False, + ) + + assert result.exit_code == 0 + assert type(result.return_value) is str # The new model output + + # Test newly created model_output_id + test_model_output_query(runner, result.return_value) + + +def test_model_output_query(runner: CliRunner, model_output_id: str): + """Test Existing Model output.""" + result = runner.invoke( + cli.model_output_query, + [model_output_id], + ) + assert result.exit_code == 0 + + def test_file_upload(runner: CliRunner, test_filepath: str, model_output_id: str): """Test file-upload via CLI. diff --git a/meorg_client/tests/test_client.py b/meorg_client/tests/test_client.py index 123b88e..13b92bc 100644 --- a/meorg_client/tests/test_client.py +++ b/meorg_client/tests/test_client.py @@ -93,6 +93,30 @@ def test_list_endpoints(client: Client): assert isinstance(response, dict) +def test_create_model_output( + client: Client, model_profile_id: str, experiment_id: str, model_output_name: str +): + """Test Creation of Model output.""" + response = client.model_output_create( + model_profile_id, experiment_id, model_output_name + ) + assert client.success() + + model_output_id = response.get("data").get("modeloutput") + assert model_output_id is not None + + test_model_output_query(client, model_output_id) + + +def test_model_output_query(client: Client, model_output_id: str): + """Test Existing Model output.""" + response = client.model_output_query(model_output_id) + assert client.success() + + response_model_output_data = response.get("data").get("modeloutput") + assert response_model_output_data.get("id") == model_output_id + + def test_upload_file(client: Client, test_filepath: str, model_output_id: str): """Test the uploading of a file. diff --git a/meorg_client/utilities.py b/meorg_client/utilities.py index 302803d..8625a22 100644 --- a/meorg_client/utilities.py +++ b/meorg_client/utilities.py @@ -101,3 +101,7 @@ def get_uploaded_file_ids(response): """ file_ids = [f.get("file") for f in response.get("data").get("files")] return file_ids + + +def is_dev_mode(): + return os.getenv("MEORG_DEV_MODE", "0") == "1"