From 532ae0c66ab91cbdc2233dcdf37a3fd7a08927c2 Mon Sep 17 00:00:00 2001 From: Abhaas Goyal Date: Wed, 21 May 2025 06:47:45 +1000 Subject: [PATCH] (#63) Implement me.org v3.0.0 API endpoints --- docs/cli.md | 38 ++++ meorg_client/cli.py | 348 ++++++++++++++++++++++++++++-- meorg_client/client.py | 127 ++++++++++- meorg_client/data/openapi.json | 319 +++++++++++++++++++++++---- meorg_client/endpoints.py | 8 +- meorg_client/tests/conftest.py | 39 ++-- meorg_client/tests/test_cli.py | 257 +++++++++++++++++++--- meorg_client/tests/test_client.py | 308 +++++++++++++++++++------- 8 files changed, 1250 insertions(+), 194 deletions(-) diff --git a/docs/cli.md b/docs/cli.md index db6dd21..393f6c9 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -126,6 +126,44 @@ This command will print the `id` and `name` of the modeloutput. If developer mod } ``` +### model output update + +Update specific fields for an existing model output ID + +```shell +meorg output update [OPTIONS] $MODEL_OUTPUT_ID +``` + +Some of the available options as flags are: + +```shell + --name + --model-profile-id + --state-selection + --parameter-selection + --comments + --is-bundle + --benchmarks +``` + + +This command will print the `id` for the updated copy of modeloutput. If developer mode is enabled, print the JSON representation for the data section of the response. An example model output data response would be: + +```json +{ + "id": "MnCj3tMzGx3NsuzwS", + "created": false, +} +``` + +### model output delete + +Remove a model output entity + +```shell +meorg output delete $MODEL_OUTPUT_ID +``` + ### analysis status To query the status of an analysis, execute the following command: diff --git a/meorg_client/cli.py b/meorg_client/cli.py index 0576fbf..86b9d0e 100644 --- a/meorg_client/cli.py +++ b/meorg_client/cli.py @@ -201,31 +201,89 @@ def file_delete(output_id: str, file_id: str): @click.command("start") -@click.argument("id") -def analysis_start(id: str): +@click.argument("model_output_id") +@click.argument("experiment_id") +def analysis_start(model_output_id: str, experiment_id: str): """ - Start the analysis for the model output id. + Start the analysis for the model output id, and an associated experiment ID. Prints the Analysis ID, which can be used in analysis-status. """ client = _get_client() - response = _call(client.start_analysis, id=id) + response = _call( + client.start_analysis, + model_output_id=model_output_id, + experiment_id=experiment_id, + ) if client.success(): analysis_id = response.get("data").get("analysisId") click.echo(analysis_id) +def _generate_model_output_config( + state_selection: str, parameter_selection: str, comments: str, is_bundle: bool +): + state_sel_dict = { + "default": "default model initialisation", + "spinup": "model spinup on forcing data", + "measurements": "states derived directly from measurements", + "other": "other", + } + + param_sel_dict = { + "automated": "automated calibration", + "manual": "manual calibration", + "none": "no calibration (model default values)", + } + + config_params = { + "state_selection": state_sel_dict.get(state_selection), + "parameter_selection": param_sel_dict.get(parameter_selection), + "comments": comments, + "is_bundle": is_bundle, + } + config_params = {k: v for k, v in config_params.items() if v} + + return config_params + + @click.command("create") @click.argument("mod_prof_id") -@click.argument("exp_id") @click.argument("name") -def create_new_model_output(mod_prof_id: str, exp_id: str, name: str): +@click.option( + "--state-selection", + type=click.Choice(["default", "spinup", "measurements", "other"]), + help="", +) +@click.option( + "--parameter-selection", + type=click.Choice(["automated", "manual", "none"]), + help="", +) +@click.option( + "--comments", + type=str, + help="", +) +@click.option( + "--is-bundle", + is_flag=True, + default=False, + help="", +) +def create_new_model_output( + mod_prof_id: str, + name: str, + state_selection: str, + parameter_selection: str, + comments: str, + is_bundle: bool, +): """ Create a new model output profile. - Parameters ---------- mod_prof_id : str @@ -237,12 +295,24 @@ def create_new_model_output(mod_prof_id: str, exp_id: str, name: str): name : str New model output name + state_selection : str + Maps to one of "default model initialisation", "model spinup on forcing data", + "states derived directly from measurements", or "other" + parameter_selection : str + Maps to one of "automated calibration", "manual calibration", or + "no calibration (model default values)" + comments : str + Additional Info on Model output + is_bundle : bool + Indicates if the model output is a bundle Prints modeloutput ID of created object, and whether it already existed or not. """ client = _get_client() - + config_params = _generate_model_output_config( + state_selection, parameter_selection, comments, is_bundle + ) response = _call( - client.model_output_create, mod_prof_id=mod_prof_id, exp_id=exp_id, name=name + client.model_output_create, mod_prof_id=mod_prof_id, name=name, **config_params ) if client.success(): @@ -265,7 +335,8 @@ def model_output_query(model_id: str): model_id : str Model Output ID. - Prints the `id` and `name` of the modeloutput, and JSON representation for the remaining metadata. + + Prints the `id` modeloutput, and JSON representation for the remaining metadata if in dev mode. """ client = _get_client() @@ -273,14 +344,237 @@ def model_output_query(model_id: str): if client.success(): - model_output_data = response.get("data").get("modeloutput") - model_output_id = model_output_data.get("id") - name = model_output_data.get("name") + model_output_id = response.get("data").get("modeloutput").get("id") + click.echo(model_output_id) + + +def _parse_csv(ctx, param, value): + if not value: + return [] + + return value.split(",") + + +@click.command("update") +@click.argument("model_output_id") +@click.option( + "--name", + type=str, + help="", +) +@click.option( + "--model-profile-id", + type=str, + help="", +) +@click.option( + "--state-selection", + type=click.Choice(["default", "spinup", "measurements", "other"]), + help="", +) +@click.option( + "--parameter-selection", + type=click.Choice(["automated", "manual", "none"]), + help="", +) +@click.option( + "--comments", + type=str, + help="", +) +@click.option( + "--is-bundle", + is_flag=True, + default=False, + help="", +) +def model_output_update( + model_output_id: str, + name: str, + model_profile_id: str, + state_selection: str, + parameter_selection: str, + comments: str, + is_bundle: bool, +): + """ + + Update specific fields of an existing model output. + + Parameters + ---------- + model_output_id : str + Model Output ID + name : str + Model Output Name + model_profile_id : str + Model Profile ID + state_selection : str + Maps to one of "default model initialisation", "model spinup on forcing data", + "states derived directly from measurements", or "other" + parameter_selection : str + Maps to one of "automated calibration", "manual calibration", or + "no calibration (model default values)" + comments : str + Additional Info on Model output + is_bundle : bool + Indicates if the model output is a bundle + """ + client = _get_client() + + updated_fields = { + "name": name, + "model": model_profile_id, + } | _generate_model_output_config( + state_selection, parameter_selection, comments, is_bundle + ) + + # Remove unpassed params to CLI + updated_fields = {k: v for k, v in updated_fields.items() if v} + + _ = _call( + client.model_output_update, + model_id=model_output_id, + updated_fields=updated_fields, + ) + + if client.success(): + click.echo("Parameters of MO updated") + + +@click.command("list") +@click.argument("model_output_id") +@click.argument("exp_id") +def model_output_benchmarks_list(model_output_id: str, exp_id: str): + """List model benchmarks. + + Parameters + ---------- + model_output_id : str + Model output ID + exp_id : str + Experiment ID + """ + client = _get_client() + response = _call( + client.model_output_benchmarks_list, + model_id=model_output_id, + exp_id=exp_id, + ) + + if client.success(): + click.echo( + f"List of available benchmarks: {json.dumps(response.get('data').get('benchmarks'), indent=4)}" + ) + click.echo( + f"List of linked benchmarks: {json.dumps(response.get('data').get('current'), indent=4)}" + ) + + return response.get("data") + + +@click.command("update") +@click.argument("model_output_id") +@click.argument("exp_id") +@click.argument("benchmark_ids", default="", callback=_parse_csv) +def model_output_benchmarks_replace( + model_output_id: str, exp_id: str, benchmark_ids: str +): + """ + Change benchmarks associated with Model output and Experiment. + + Parameters + ---------- + model_output_id : str + Model output ID + exp_id : str + Experiment ID + benchmarks : list[str] + List of benchmarks IDs to fully replace existing + """ + client = _get_client() + _ = _call( + client.model_output_benchmarks_replace, + model_id=model_output_id, + exp_id=exp_id, + updated_benchmarks=benchmark_ids, + ) + + if client.success(): + click.echo("Benchmark updated") + + +@click.command("update") +@click.argument("model_output_id") +@click.argument("exp_ids", default="", callback=_parse_csv) +def model_output_experiments_extend(model_output_id: str, exp_ids: list[str]): + """ + Extend existing set of experiment associations. + + Parameters + ---------- + model_output_id : str + Model output ID + + experiments : list[str] + List of experiment IDs + """ + client = _get_client() + _ = _call( + client.model_output_experiments_extend, + model_id=model_output_id, + updated_experiments=exp_ids, + ) + + if client.success(): + click.echo("Experiments updated") + + +@click.command("delete") +@click.argument("model_output_id") +@click.argument("exp_id") +def model_output_experiment_delete(model_output_id: str, exp_id: str): + """Delete specific experiment associated with model output + + Parameters + ---------- + model_output_id : str + Model output ID + + experiment : str + Experiment IDs + """ + client = _get_client() + _ = _call( + client.model_output_experiment_delete, model_id=model_output_id, exp_id=exp_id + ) + + if client.success(): + click.echo(f"Experiment ID: {exp_id} deleted") + + +@click.command("delete") +@click.argument("model_id") +def model_output_delete(model_id: str): + """ + Remove model output entity + + Parameters + ---------- + model_id : str + Model Output ID. + + Prints the status of the operation. + """ + client = _get_client() + + response = _call(client.model_output_delete, model_id=model_id) + + if client.success(): if mcu.is_dev_mode(): - click.echo(f"Model Output: {json.dumps(model_output_data, indent=4)}") + click.echo(f"Delete: {json.dumps(response, indent=4)}") else: - click.echo(f"Model Output ID: {model_output_id}") - click.echo(f"Model Output Name: {name}") + click.echo(f"Operation status: {response.get('status')}") @click.command("status") @@ -365,6 +659,16 @@ def cli_model_output(): pass +@click.group("benchmark", help="Model output benchmark commands.") +def cli_model_benchmark(): + pass + + +@click.group("experiment", help="Model output experiment commands.") +def cli_model_experiments(): + pass + + # Add file commands cli_file.add_command(file_list) cli_file.add_command(file_upload) @@ -381,6 +685,16 @@ def cli_model_output(): # Add output command cli_model_output.add_command(create_new_model_output) cli_model_output.add_command(model_output_query) +cli_model_output.add_command(model_output_update) +cli_model_output.add_command(model_output_delete) + +# Benchmarks command +cli_model_benchmark.add_command(model_output_benchmarks_list) +cli_model_benchmark.add_command(model_output_benchmarks_replace) + +# Experiments command +cli_model_experiments.add_command(model_output_experiments_extend) +cli_model_experiments.add_command(model_output_experiment_delete) # Add subparsers to the master cli.add_command(cli_endpoints) @@ -388,6 +702,8 @@ def cli_model_output(): cli.add_command(cli_analysis) cli.add_command(initialise) cli.add_command(cli_model_output) +cli.add_command(cli_model_benchmark) +cli.add_command(cli_model_experiments) if __name__ == "__main__": diff --git a/meorg_client/client.py b/meorg_client/client.py index 3b868e3..41d3a75 100644 --- a/meorg_client/client.py +++ b/meorg_client/client.py @@ -433,7 +433,9 @@ def delete_all_files_from_model_output(self, id: str): return responses - def start_analysis(self, id: str) -> Union[dict, requests.Response]: + def start_analysis( + self, model_output_id: str, experiment_id: str + ) -> Union[dict, requests.Response]: """Start the analysis chain. Parameters @@ -449,11 +451,11 @@ def start_analysis(self, id: str) -> Union[dict, requests.Response]: return self._make_request( method=mcc.HTTP_PUT, endpoint=endpoints.ANALYSIS_START, - url_path_fields=dict(id=id), + url_path_fields=dict(id=model_output_id, expid=experiment_id), ) def model_output_create( - self, mod_prof_id: str, exp_id: str, name: str + self, mod_prof_id: str, name: str, **config_params ) -> Union[dict, requests.Response]: """ Create a new model output entity @@ -474,7 +476,7 @@ def model_output_create( return self._make_request( method=mcc.HTTP_POST, endpoint=endpoints.MODEL_OUTPUT_CREATE, - data=dict(experiment=exp_id, model=mod_prof_id, name=name), + data=dict(model=mod_prof_id, name=name) | config_params, ) def model_output_query(self, model_id: str) -> Union[dict, requests.Response]: @@ -496,6 +498,123 @@ def model_output_query(self, model_id: str) -> Union[dict, requests.Response]: url_params=dict(id=model_id), ) + def model_output_update( + self, model_id: str, updated_fields: dict + ) -> Union[dict, requests.Response]: + """ + Update specific fields of an existing model output. + Parameters + ---------- + model_id : str + Model Output ID + + params : dict + Request body containing necessary fields to be updated + + Returns + ------- + Union[dict, requests.Response] + Response from ME.org. + """ + return self._make_request( + method=mcc.HTTP_PATCH, + endpoint=endpoints.MODEL_OUTPUT_UPDATE, + url_path_fields=dict(id=model_id), + json=updated_fields, + ) + + def model_output_benchmarks_list( + self, model_id: str, exp_id: str + ) -> Union[dict, requests.Response]: + return self._make_request( + method=mcc.HTTP_GET, + endpoint=endpoints.MODEL_OUTPUT_BENCHMARKS, + url_path_fields=dict(id=model_id, expId=exp_id), + ) + + def model_output_benchmarks_replace( + self, model_id: str, exp_id: str, updated_benchmarks: list[str] + ) -> Union[dict, requests.Response]: + """ + Replace benchmarks. + Parameters + ---------- + model_id : str + Model Output ID + + exp_id: str + Experiment ID + + updated_benchmarks: + + Returns + ------- + Union[dict, requests.Response] + Response from ME.org. + """ + return self._make_request( + method=mcc.HTTP_PATCH, + endpoint=endpoints.MODEL_OUTPUT_BENCHMARKS, + url_path_fields=dict(id=model_id, expId=exp_id), + json=dict(benchmarks=updated_benchmarks), + ) + + def model_output_experiments_extend( + self, model_id: str, updated_experiments: list[str] + ) -> Union[dict, requests.Response]: + """ + Add experiments. + Parameters + ---------- + model_id : str + Model Output ID + + exp_id: str + Experiment ID + + updated_benchmarks: + + Returns + ------- + Union[dict, requests.Response] + Response from ME.org. + """ + return self._make_request( + method=mcc.HTTP_PATCH, + endpoint=endpoints.MODEL_OUTPUT_EXPERIMENTS, + url_path_fields=dict(id=model_id), + json=dict(experiments=updated_experiments), + ) + + def model_output_experiment_delete( + self, model_id: str, exp_id: str + ) -> Union[dict, requests.Response]: + return self._make_request( + method=mcc.HTTP_DELETE, + endpoint=endpoints.MODEL_OUTPUT_EXPERIMENTS, + url_path_fields=dict(id=model_id), + json=dict(experiment=exp_id), + ) + + def model_output_delete(self, model_id: str) -> Union[dict, requests.Response]: + """ + Remove specific new model output entity + Parameters + ---------- + model_id : str + Model Output ID + + Returns + ------- + Union[dict, requests.Response] + Response from ME.org. + """ + return self._make_request( + method=mcc.HTTP_DELETE, + endpoint=endpoints.MODEL_OUTPUT_DELETE, + url_path_fields=dict(id=model_id), + ) + def get_analysis_status(self, id: str) -> Union[dict, requests.Response]: """Check the status of the analysis chain. diff --git a/meorg_client/data/openapi.json b/meorg_client/data/openapi.json index c9721d3..4ddb2fc 100644 --- a/meorg_client/data/openapi.json +++ b/meorg_client/data/openapi.json @@ -2,7 +2,7 @@ "openapi": "3.1.0", "info": { "title": "Modelevaluation REST endpoints", - "version": "2.1.0" + "version": "3.0.1" }, "servers": [ { @@ -97,7 +97,7 @@ }, "/modeloutput": { "get": { - "summary": "Retieve JSON representation from id or unique name", + "summary": "Retrieve JSON representation using id or unique name", "description": "Query by MO by id or by unique MO name", "tags": [ "Model Outputs" @@ -128,7 +128,7 @@ ], "responses": { "200": { - "description": "Object may contain the following fields - \"id\", \"created\", \"modified\", \"state_selection\", \"parameter_selection\", \"comments\", \"is_bundle\", \"benchmarks\"", + "description": "Object may contain the following fields - \"id\", \"created\", \"modified\", \"state_selection\", \"parameter_selection\", \"comments\", \"is_bundle\"", "content": { "application/json": { "schema": { @@ -147,11 +147,8 @@ "example": { "id": "me5pq77uhy4bjR78", "name": "abc", - "benchmarks": [ - "1", - "2", - "3" - ] + "created": "2025-02-02T13:44:28.203Z", + "state_selection": "other" } } } @@ -174,7 +171,7 @@ }, "post": { "summary": "Create a new MO entity", - "description": "If model output name is existing, and belongs to the current user, the existing MO's id is returned", + "description": "If model output name is existing, and belongs to the current user, treat as an update request.", "tags": [ "Model Outputs" ], @@ -185,22 +182,17 @@ } ], "requestBody": { - "description": "Experiment and Model IDs must be existing. Name length restricted to 50 characters", + "description": "Model IDs must be existing. Name length restricted to 50 characters", "required": true, "content": { "application/json": { "schema": { "type": "object", "required": [ - "experiment", "model", "name" ], "properties": { - "experiment": { - "type": "string", - "description": "The ID of the experiment" - }, "model": { "type": "string", "description": "The ID of the model profile. Note!!! Not to be confused with a model output." @@ -228,14 +220,6 @@ "type": "boolean", "description": "Indicates if the model output is a bundle. If so, this model output will not be a benchmark option for other analyses", "nullable": true - }, - "benchmarks": { - "type": "array", - "description": "List of benchmarks associated with the model output. Benchmarks must be accessible from the associated experiment", - "items": { - "type": "string" - }, - "nullable": true } } } @@ -244,7 +228,7 @@ }, "responses": { "200": { - "description": "Contains the existing MO Id in data field. It will specify whether an existing record was returned or a new record created", + "description": "Contains the existing MO Id in data field. It will specify whether an existing record was found and updated or a new record created", "content": { "application/json": { "schema": { @@ -258,12 +242,12 @@ "properties": { "modeloutput": { "type": "string", - "description": "modeloutput ID of created object", + "description": "Modeloutput ID of created object", "example": "me5pq77uhy4bjR78R" }, - "created": { + "existing": { "type": "boolean", - "description": "value indicates whether resource was existing or new", + "description": "Value indicates whether resource was existing or new", "example": false } } @@ -288,7 +272,7 @@ "/modeloutput/{id}": { "patch": { "summary": "Update specific fields of an existing MO", - "description": "See schema below for detail on fields that may be edited. Note experiments cannot be edited via API.", + "description": "See schema below for detail on fields that may be edited. Note experiments cannot be edited via API. Request body cannot be empty", "tags": [ "Model Outputs" ], @@ -346,14 +330,6 @@ "type": "boolean", "description": "Indicates if the model output is a bundle. If so, this model output will not be a benchmark option for other analyses", "nullable": true - }, - "benchmarks": { - "type": "array", - "description": "List of benchmarks associated with the model output. Benchmarks must be accessible from the associated experiment", - "items": { - "type": "string" - }, - "nullable": true } } } @@ -451,9 +427,9 @@ } } }, - "/modeloutput/{id}/available-benchmarks": { + "/modeloutput/{id}/{expId}/available-benchmarks": { "get": { - "summary": "Retrieve related MOs which may be used as a benchmark", + "summary": "Retrieve related MOs which can be used as a benchmark. Additionally, retrieves benchmarks currently used with MO.", "description": "Only model outputs that are used in the same experiment can be designated as a benchmark", "tags": [ "Model Outputs" @@ -473,6 +449,15 @@ "schema": { "type": "string" } + }, + { + "in": "path", + "name": "expId", + "required": true, + "description": "Experiment id", + "schema": { + "type": "string" + } } ], "responses": { @@ -490,10 +475,28 @@ "type": "object", "properties": { "benchmarks": { + "type": "Array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "modeloutput ID of potential benchmark", + "example": "me5pq77uhy4bjR78R" + }, + "name": { + "type": "string", + "description": "human readable name of the MO", + "example": "myModelOutput-2025" + } + } + } + }, + "current": { "type": "Array", "items": { "type": "string", - "description": "modeloutput ID of available benchmark", + "description": "modeloutput ID of an active benchmark", "example": "me5pq77uhy4bjR78R" } } @@ -514,9 +517,232 @@ "$ref": "#/components/unauthorisedResponse" } } + }, + "patch": { + "summary": "Change benchmarks associated with MO-EXP", + "description": "Replace benchmarks entirely with new list. Up to a max of 3 benchmarks", + "tags": [ + "Model Outputs" + ], + "security": [ + { + "userId": [], + "authToken": [] + } + ], + "parameters": [ + { + "in": "path", + "name": "id", + "required": true, + "description": "Model output id", + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "expId", + "required": true, + "description": "Experiment id", + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "description": "Only valid benchmarks can be linked", + "required": true, + "content": { + "application/json": { + "schema": { + "type": "object", + "required": [ + "benchmarks" + ], + "properties": { + "benchmarks": { + "type": "Array", + "description": "List of benchmarks to fully replace existing", + "items": { + "type": "string" + } + } + } + } + } + } + }, + "responses": { + "200": { + "description": "Successfuly updated benchmarks", + "content": { + "application/json": { + "schema": { + "properties": { + "status": { + "type": "string", + "example": "success" + } + } + } + } + } + }, + "400": { + "$ref": "#/components/invalidData" + }, + "401": { + "$ref": "#/components/unauthenticated" + }, + "403": { + "$ref": "#/components/unauthorisedResponse" + } + } } }, - "/modeloutput/{id}/start.json": { + "/modeloutput/{id}/available-experiments": { + "patch": { + "summary": "Extend existing set of experiment associations", + "description": "Links a list of new experiments to model output. User must have access to the workspace of the cloned experiment", + "tags": [ + "Model Outputs" + ], + "security": [ + { + "userId": [], + "authToken": [] + } + ], + "parameters": [ + { + "in": "path", + "name": "id", + "required": true, + "description": "Model output id", + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "type": "object", + "required": [ + "experiments" + ], + "properties": { + "experiments": { + "type": "Array", + "description": "List of experiments to append", + "items": { + "type": "string" + } + } + } + } + } + } + }, + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "properties": { + "status": { + "type": "string", + "example": "success" + } + } + } + } + } + }, + "400": { + "$ref": "#/components/invalidData" + }, + "401": { + "$ref": "#/components/unauthenticated" + }, + "403": { + "$ref": "#/components/unauthorisedResponse" + } + } + }, + "delete": { + "summary": "Remove an experiment association from a MO", + "description": "Removes an experiment association from MO. Only owners of the MO, or users with admin roles can do so.", + "tags": [ + "Model Outputs" + ], + "security": [ + { + "userId": [], + "authToken": [] + } + ], + "parameters": [ + { + "in": "path", + "name": "id", + "required": true, + "description": "Model output id", + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "type": "object", + "required": [ + "experiment" + ], + "properties": { + "experiment": { + "type": "string" + } + } + } + } + } + }, + "responses": { + "200": { + "description": "Successfuly removed experiment", + "content": { + "application/json": { + "schema": { + "properties": { + "status": { + "type": "string", + "example": "success" + } + } + } + } + } + }, + "400": { + "$ref": "#/components/invalidData" + }, + "401": { + "$ref": "#/components/unauthenticated" + }, + "403": { + "$ref": "#/components/unauthorisedResponse" + } + } + } + }, + "/modeloutput/{id}/{expId}/start.json": { "get": { "description": "Get the JSON in text form that would be sent to the worker to start analysis.", "tags": [ @@ -685,7 +911,7 @@ }, "/modeloutput/{id}/files/{fileId}": { "delete": { - "description": "Delete a specific file from a model output. Requires ownership of the modeloutput - i.e. be the creation user.", + "description": "Delete a specific file from a model output. Can be MO files or from ancillary files. Requires ownership of the modeloutput", "tags": [ "Model Outputs" ], @@ -751,7 +977,7 @@ } } }, - "/modeloutput/{id}/start": { + "/modeloutput/{id}/{expId}/start": { "put": { "description": "Start analysis for the model output", "tags": [ @@ -772,6 +998,15 @@ "schema": { "type": "string" } + }, + { + "in": "path", + "name": "expId", + "required": true, + "description": "Experiment id", + "schema": { + "type": "string" + } } ], "responses": { diff --git a/meorg_client/endpoints.py b/meorg_client/endpoints.py index 3ac8446..16766e8 100644 --- a/meorg_client/endpoints.py +++ b/meorg_client/endpoints.py @@ -14,9 +14,15 @@ FILE_STATUS = "files/status/{id}" # Analysis -ANALYSIS_START = "modeloutput/{id}/start" +ANALYSIS_START = "modeloutput/{id}/{expid}/start" ANALYSIS_STATUS = "analysis/{id}/status" # Model Outputs MODEL_OUTPUT_CREATE = "modeloutput" MODEL_OUTPUT_QUERY = MODEL_OUTPUT_CREATE + +MODEL_OUTPUT_UPDATE = "modeloutput/{id}" +MODEL_OUTPUT_DELETE = MODEL_OUTPUT_UPDATE + +MODEL_OUTPUT_BENCHMARKS = "modeloutput/{id}/{expId}/available-benchmarks" +MODEL_OUTPUT_EXPERIMENTS = "modeloutput/{id}/available-experiments" diff --git a/meorg_client/tests/conftest.py b/meorg_client/tests/conftest.py index 9117906..7bb5258 100644 --- a/meorg_client/tests/conftest.py +++ b/meorg_client/tests/conftest.py @@ -1,36 +1,29 @@ +from typing import Dict import os import pytest +from pytest import StashKey, CollectReport + +phase_report_key = StashKey[Dict[str, CollectReport]]() # Set dev mode os.environ["MEORG_DEV_MODE"] = "1" -class ValueStorage: - def __init__(self): - self.data = dict() - - def get(self, key): - return self.data.get(key, None) - - def set(self, key, value): - self.data[key] = value +# https://docs.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures +@pytest.hookimpl(wrapper=True, tryfirst=True) +def pytest_runtest_makereport(item, call): + """Have more information on the status for pytests. - def __repr__(self): - lines = "" - for k, v in self.data.items(): - lines += f"{k} = {v}\n" - - return lines + The results can be used within fixtures + """ + # execute all other hooks to obtain the report object + rep = yield + # store test results for each phase of a call, which can + # be "setup", "call", "teardown" + item.stash.setdefault(phase_report_key, {})[rep.when] = rep -# Add some things to the store -store = ValueStorage() -store.set("email", os.environ.get("MEORG_EMAIL")) -store.set("password", os.environ.get("MEORG_PASSWORD")) -store.set("model_output_id", os.environ.get("MEORG_MODEL_OUTPUT_ID")) -store.set("experiment_id", os.environ.get("MEORG_EXPERIMENT_ID")) -store.set("model_profile_id", os.environ.get("MEORG_MODEL_PROFILE_ID")) -store.set("model_output_name", os.environ.get("MEORG_MODEL_OUTPUT_NAME")) + return rep @pytest.fixture diff --git a/meorg_client/tests/test_cli.py b/meorg_client/tests/test_cli.py index 0023646..7074ae9 100644 --- a/meorg_client/tests/test_cli.py +++ b/meorg_client/tests/test_cli.py @@ -4,8 +4,8 @@ import meorg_client.cli as cli import os import meorg_client.utilities as mu -from conftest import store import pytest +from conftest import phase_report_key @pytest.fixture @@ -33,15 +33,72 @@ def test_filepath() -> str: @pytest.fixture -def model_output_id() -> str: - """Get the model output ID out of the environment. +def model_output_generator(request, runner: CliRunner, model_profile_id: str): + """A generator function for creating new model outputs before running a test. - Returns + After the test has run, automatically deletes all model outputs created within + the test + + Parameters ------- - str - Model output ID. + request: + Request object by pytest + + click.testing.CliRunner + Runner object. + + model_profile_id + Model profile ID. """ - return os.getenv("MEORG_MODEL_OUTPUT_ID") + model_output_ids = [] + + def _make_model_output(model_output_name): + """Create new model output ID. + + Parameters + ------- + name: + Model output name + """ + # `model_profile_id` from `model_output_generator` + result = runner.invoke( + cli.create_new_model_output, + [model_profile_id, model_output_name], + standalone_mode=False, + ) + assert result.exit_code == 0 + model_output_id = result.return_value + model_output_ids.append(model_output_id) + return model_output_id + + yield _make_model_output + + # If using tests like model_output_delete, where model output is already deleted, + # we don't want to do the teardown process + if hasattr(request.node, "skip_teardown"): + return + + # If test failed, for debugging purposes, we want to keep the model output in + # me.org + report = request.node.stash[phase_report_key] + if report["call"].failed: + print("Call to test failed", request.node.nodeid) + return + + for model_output_id in model_output_ids: + runner.invoke(cli.model_output_delete, [model_output_id]) + + +@pytest.fixture +def model_output_id(model_output_generator: str): + """Generate fresh model output ID. + + Parameters + ---------- + model_output_generator: str + Model output generator function + """ + return model_output_generator("base_model_output") def test_list_endpoints(runner: CliRunner): @@ -56,28 +113,154 @@ def test_list_endpoints(runner: CliRunner): assert result.exit_code == 0 -def test_create_model_output( - runner: CliRunner, model_profile_id, experiment_id, model_output_name +class TestModelOutput: + def test_create_model_output( + self, runner: CliRunner, model_profile_id, model_output_name + ): + """Test Creation of Model output.""" + result = runner.invoke( + cli.create_new_model_output, + [model_profile_id, model_output_name], + standalone_mode=False, + ) + + assert result.exit_code == 0 + model_output_id = result.return_value + assert isinstance(model_output_id, str) # The new model output + + def test_model_output_query(self, runner: CliRunner, model_output_id: str): + """Test Existing Model output.""" + result = runner.invoke( + cli.model_output_query, + [model_output_id], + ) + assert result.exit_code == 0 + + def test_model_output_update( + self, + runner: CliRunner, + model_output_name: str, + model_profile_id: str, + model_output_id: str, + ): + """Test Existing Model output.""" + result = runner.invoke( + cli.model_output_update, + [ + model_output_id, + # model_output_name, + "--model-profile-id", + model_profile_id, + "--state-selection", + "default", + "--parameter-selection", + "automated", + "--is-bundle", + ], + ) + assert result.exit_code == 0 + + def test_model_output_delete( + self, request, runner: CliRunner, model_output_id: str + ): + request.node.skip_teardown = True + result = runner.invoke(cli.model_output_delete, [model_output_id]) + assert result.exit_code == 0 + + +class TestBenchmark: + + # This model_output_id will always have multiple benchmarks + @pytest.fixture + def model_output_id( + self, runner: CliRunner, model_output_generator, experiment_id: str + ): + latest_id = None + for i in range(2): + latest_id = model_output_generator(f"meorg_test_benchmark{i}") + runner.invoke( + cli.model_output_experiments_extend, + [latest_id, experiment_id], + ) + return latest_id + + def _check_available_benchmarks(self, result, expected): + available_benchmarks = result.get("benchmarks") + assert isinstance(available_benchmarks, list) + assert len(available_benchmarks) == expected + return available_benchmarks + + def _check_current_benchmarks(self, result, expected): + current_benchmarks = result.get("current") + assert isinstance(current_benchmarks, list) + assert len(current_benchmarks) == expected + return current_benchmarks + + def test_model_output_benchmarks_list( + self, runner: CliRunner, experiment_id: str, model_output_id: str + ): + result = runner.invoke( + cli.model_output_benchmarks_list, + [model_output_id, experiment_id], + standalone_mode=False, + ) + + assert result.exit_code == 0 + + self._check_available_benchmarks(result.return_value, 1) + self._check_current_benchmarks(result.return_value, 0) + + def test_model_output_benchmarks_replace( + self, runner: CliRunner, experiment_id: str, model_output_id: str + ): + result = runner.invoke( + cli.model_output_benchmarks_list, + [model_output_id, experiment_id], + standalone_mode=False, + ) + available_benchmarks = self._check_available_benchmarks(result.return_value, 1) + self._check_current_benchmarks(result.return_value, 0) + + result = runner.invoke( + cli.model_output_benchmarks_replace, + [ + model_output_id, + experiment_id, + available_benchmarks[0]["id"], + ], + standalone_mode=False, + ) + assert result.exit_code == 0 + + result = runner.invoke( + cli.model_output_benchmarks_list, + [model_output_id, experiment_id], + standalone_mode=False, + ) + self._check_available_benchmarks(result.return_value, 0) + self._check_current_benchmarks(result.return_value, 1) + + +def test_model_output_experiments_extend( + runner: CliRunner, model_output_id: str, experiment_id: str ): - """Test Creation of Model output.""" result = runner.invoke( - cli.create_new_model_output, - [model_profile_id, experiment_id, model_output_name], - standalone_mode=False, + cli.model_output_experiments_extend, + [model_output_id, experiment_id], ) - assert result.exit_code == 0 - assert type(result.return_value) is str # The new model output - # Test newly created model_output_id - test_model_output_query(runner, result.return_value) - -def test_model_output_query(runner: CliRunner, model_output_id: str): - """Test Existing Model output.""" +def test_model_output_experiment_delete( + runner: CliRunner, model_output_id: str, experiment_id: str +): + result = runner.invoke( + cli.model_output_experiments_extend, + [model_output_id, experiment_id], + ) result = runner.invoke( - cli.model_output_query, - [model_output_id], + cli.model_output_experiment_delete, + [model_output_id, experiment_id], ) assert result.exit_code == 0 @@ -98,9 +281,6 @@ def test_file_upload(runner: CliRunner, test_filepath: str, model_output_id: str result = runner.invoke(cli.file_upload, [test_filepath, model_output_id]) assert result.exit_code == 0 - # Add the job_id to the store for the next test - store.set("file_id", result.stdout.split()[-1].strip()) - def test_file_multiple(runner: CliRunner, test_filepath: str, model_output_id: str): """Test file-upload via CLI. @@ -118,9 +298,6 @@ def test_file_multiple(runner: CliRunner, test_filepath: str, model_output_id: s ) assert result.exit_code == 0 - # Add the job_id to the store for the next test - store.set("file_ids", result.stdout.strip()) - def test_file_upload_parallel( runner: CliRunner, test_filepath: str, model_output_id: str @@ -141,7 +318,7 @@ def test_file_upload_parallel( assert result.exit_code == 0 -def test_file_list(runner: CliRunner): +def test_file_list(runner: CliRunner, model_output_id: str): """Test file-list via CLI. Parameters @@ -149,11 +326,13 @@ def test_file_list(runner: CliRunner): runner : CliRunner Runner. """ - result = runner.invoke(cli.file_list, [store.get("model_output_id")]) + result = runner.invoke(cli.file_list, [model_output_id]) assert result.exit_code == 0 -def test_delete_file_from_output(runner: CliRunner, model_output_id: str): +def test_delete_file_from_output( + runner: CliRunner, test_filepath: str, model_output_id: str +): """Test deleting a file from a model output. Parameters @@ -163,15 +342,23 @@ def test_delete_file_from_output(runner: CliRunner, model_output_id: str): model_output_id : str Model output ID. """ + + result = runner.invoke( + cli.file_upload, [test_filepath, test_filepath, model_output_id] + ) + file_ids = result.output.strip() + # Get the last file added - file_id = store.get("file_ids").splitlines()[-1] + file_id = file_ids.splitlines()[-1] # Delete it - result = runner.invoke(cli.file_delete, [store.get("model_output_id"), file_id]) + result = runner.invoke(cli.file_delete, [model_output_id, file_id]) assert result.exit_code == 0 -def test_delete_all_files_from_output(runner: CliRunner, model_output_id: str): +def test_delete_all_files_from_output( + runner: CliRunner, test_filepath: str, model_output_id: str +): """Test deleting all files from a model output. Parameters @@ -182,5 +369,7 @@ def test_delete_all_files_from_output(runner: CliRunner, model_output_id: str): Model output ID. """ + _ = runner.invoke(cli.file_upload, [test_filepath, test_filepath, model_output_id]) + result = runner.invoke(cli.file_delete_all, [model_output_id]) assert result.exit_code == 0 diff --git a/meorg_client/tests/test_client.py b/meorg_client/tests/test_client.py index 13b92bc..7682dd9 100644 --- a/meorg_client/tests/test_client.py +++ b/meorg_client/tests/test_client.py @@ -4,9 +4,8 @@ import pytest from meorg_client.client import Client import meorg_client.utilities as mu -from conftest import store import tempfile as tf -import time +from conftest import phase_report_key def _get_authenticated_client() -> Client: @@ -37,19 +36,7 @@ def _get_authenticated_client() -> Client: return client -@pytest.fixture -def model_output_id() -> str: - """Get the model output ID. - - Returns - ------- - str - Model output ID. - """ - return os.environ.get("MEORG_MODEL_OUTPUT_ID") - - -@pytest.fixture +@pytest.fixture(scope="module") def client() -> Client: """Get an authenticated client. @@ -93,28 +80,184 @@ def test_list_endpoints(client: Client): assert isinstance(response, dict) -def test_create_model_output( - client: Client, model_profile_id: str, experiment_id: str, model_output_name: str -): - """Test Creation of Model output.""" - response = client.model_output_create( - model_profile_id, experiment_id, model_output_name - ) - assert client.success() +@pytest.fixture +def model_output_generator(request, client: Client, model_profile_id: str): + """A generator function for creating new model outputs before running a test. + + After the test has run, automatically deletes all model outputs created within + the test + + Parameters + ------- + request: + Request object by pytest + + client : Client + Client. + + model_profile_id + Model profile ID. + """ + model_output_ids = [] + + def _make_model_output(model_output_name): + """Create new model output ID. - model_output_id = response.get("data").get("modeloutput") - assert model_output_id is not None + Parameters + ------- + name: + Model output name + """ + # `model_profile_id` from `model_output_generator` + response = client.model_output_create(model_profile_id, model_output_name) + model_output_id = response.get("data").get("modeloutput") + model_output_ids.append(model_output_id) + return model_output_id - test_model_output_query(client, model_output_id) + yield _make_model_output + # If using tests like model_output_delete, where model output is already deleted, + # we don't want to do the teardown process + if hasattr(request.node, "skip_teardown"): + return -def test_model_output_query(client: Client, model_output_id: str): - """Test Existing Model output.""" - response = client.model_output_query(model_output_id) + # If test failed, for debugging purposes, we want to keep the model output in + # me.org + report = request.node.stash[phase_report_key] + if report["call"].failed: + print("Call to test failed", request.node.nodeid) + return + + for model_output_id in model_output_ids: + client.model_output_delete(model_output_id) + + +@pytest.fixture +def model_output_id(model_output_generator: str): + """Generate fresh model output ID. + + Parameters + ---------- + model_output_generator: str + Model output generator function + """ + return model_output_generator("base_model_output") + + +class TestModelOutput: + + def test_create_model_output( + self, client: Client, model_profile_id: str, model_output_name: str + ): + """Test Creation of Model output.""" + response = client.model_output_create(model_profile_id, model_output_name) + assert client.success() + + model_output_id = response.get("data").get("modeloutput") + assert model_output_id is not None + + self.test_model_output_query(client, model_output_id) + + def test_model_output_query(self, client: Client, model_output_id: str): + """Test Existing Model output.""" + response = client.model_output_query(model_output_id) + assert client.success() + + response_model_output_data = response.get("data").get("modeloutput") + assert response_model_output_data.get("id") == model_output_id + + def test_model_output_update( + self, + client: Client, + model_output_id: str, + model_profile_id: str, + ): + """Test updation of model output.""" + + update_data = { + "name": "updated_mo_name", + "model": model_profile_id, + "state_selection": "default model initialisation", + "parameter_selection": "automated calibration", + "comments": "updated model output pytest", + "is_bundle": False, + } + _ = client.model_output_update(model_output_id, update_data) + assert client.success() + + def test_model_output_delete(self, request, client: Client, model_output_id: str): + request.node.skip_teardown = True + _ = client.model_output_delete(model_output_id) + assert client.success() + + +class TestBenchmark: + + # This model_output_id will always have multiple benchmarks + @pytest.fixture + def model_output_id( + self, client: Client, model_output_generator, experiment_id: str + ): + latest_id = None + for i in range(2): + latest_id = model_output_generator(f"meorg_test_benchmark{i}") + client.model_output_experiments_extend(latest_id, [experiment_id]) + return latest_id + + def _check_available_benchmarks(self, response, expected): + available_benchmarks = response.get("data").get("benchmarks") + assert isinstance(available_benchmarks, list) + assert len(available_benchmarks) == expected + return available_benchmarks + + def _check_current_benchmarks(self, response, expected): + current_benchmarks = response.get("data").get("current") + assert isinstance(current_benchmarks, list) + assert len(current_benchmarks) == expected + return current_benchmarks + + def test_model_output_benchmarks_list( + self, client: Client, model_output_id: str, experiment_id: str + ): + + response = client.model_output_benchmarks_list(model_output_id, experiment_id) + self._check_available_benchmarks(response, 1) + self._check_current_benchmarks(response, 0) + assert client.success() + + def test_model_output_benchmarks_replace( + self, client: Client, model_output_id: str, experiment_id: str + ): + response = client.model_output_benchmarks_list(model_output_id, experiment_id) + available_benchmarks = self._check_available_benchmarks(response, 1) + self._check_current_benchmarks(response, 0) + + client.model_output_benchmarks_replace( + model_output_id, + experiment_id, + [available_benchmarks[0]["id"]], + ) + assert client.success() + + response = client.model_output_benchmarks_list(model_output_id, experiment_id) + self._check_available_benchmarks(response, 0) + self._check_current_benchmarks(response, 1) + + +def test_model_output_experiments_extend( + client: Client, model_output_id: str, experiment_id: str +): + client.model_output_experiments_extend(model_output_id, [experiment_id]) assert client.success() - response_model_output_data = response.get("data").get("modeloutput") - assert response_model_output_data.get("id") == model_output_id + +def test_model_output_experiment_delete( + client: Client, model_output_id: str, experiment_id: str +): + # For now, since fresh model output id + client.model_output_experiments_extend(model_output_id, [experiment_id]) + client.model_output_experiment_delete(model_output_id, experiment_id) + assert client.success() def test_upload_file(client: Client, test_filepath: str, model_output_id: str): @@ -135,9 +278,6 @@ def test_upload_file(client: Client, test_filepath: str, model_output_id: str): # Make sure it worked assert client.success() - # Store the response. - store.set("file_upload", response) - def test_upload_file_multiple(client: Client, test_filepath: str, model_output_id: str): """Test the uploading of multiple files in sequence. @@ -159,9 +299,6 @@ def test_upload_file_multiple(client: Client, test_filepath: str, model_output_i [response.get("data").get("files")[0].get("id") for response in responses] ) - # Store the response. - store.set("file_upload_multiple", responses) - def test_file_list(client: Client, model_output_id: str): """Test the listing of files for a model output. @@ -176,40 +313,55 @@ def test_file_list(client: Client, model_output_id: str): response = client.list_files(model_output_id) assert client.success() assert isinstance(response.get("data").get("files"), list) - store.set("file_list", response) - - -def test_start_analysis(client: Client, model_output_id: str): - """Test starting an analysis. - - Parameters - ---------- - client : Client - Client. - model_output_id : str - Model output ID. - """ - # Wait 5s for data to move from cache to store (otherwise analysis will fail, still might) - time.sleep(5) - response = client.start_analysis(model_output_id) - assert client.success() - - # Store result for status check below - store.set("start_analysis", response) -def test_get_analysis_status(client: Client): - """Test getting the analysis status. - - Parameters - ---------- - client : Client - Client. - """ - # Get the analysis id from the store - analysis_id = store.get("start_analysis").get("data").get("analysisId") - _ = client.get_analysis_status(analysis_id) - assert client.success() +class TestAnalysis: + + @pytest.fixture + def model_output_id_analysis( + self, + client: Client, + test_filepath: str, + experiment_id: str, + model_output_generator, + ): + model_output_id = model_output_generator("meorg_test_analysis") + client.model_output_experiments_extend(model_output_id, [experiment_id]) + client.upload_files([test_filepath, test_filepath], model_output_id) + return model_output_id + + @pytest.fixture + def analysis_id( + self, client: Client, model_output_id_analysis: str, experiment_id: str + ): + response = client.start_analysis(model_output_id_analysis, experiment_id) + return response.get("data").get("analysisId") + + def test_start_analysis( + self, client: Client, model_output_id_analysis: str, experiment_id: str + ): + """Test starting an analysis. + + Parameters + ---------- + client : Client + Client. + model_output_id : str + Model output ID. + """ + _ = client.start_analysis(model_output_id_analysis, experiment_id) + assert client.success() + + def test_get_analysis_status(self, client: Client, analysis_id: str): + """Test getting the analysis status. + + Parameters + ---------- + client : Client + Client. + """ + _ = client.get_analysis_status(analysis_id) + assert client.success() @pytest.mark.xfail(strict=False) @@ -292,7 +444,9 @@ def test_upload_file_parallel_no_progress( ) -def test_delete_file_from_model_output(client: Client, model_output_id: str): +def test_delete_file_from_model_output( + client: Client, test_filepath: str, model_output_id: str +): """Test deleting a file from a model output. Parameters @@ -302,12 +456,13 @@ def test_delete_file_from_model_output(client: Client, model_output_id: str): model_output_id : str Model output ID. """ + file_upload = client.upload_files(test_filepath, id=model_output_id)[0] # Retrieve the uploaded file ID from earlier. - file_id = store.get("file_upload").get("data").get("files")[0].get("id") + file_id = file_upload.get("data").get("files")[0].get("id") # Retieve the file list from earlier. - files = store.get("file_list") + files = client.list_files(model_output_id) # Convert to a list of JUST the IDs, none of the extra attributes. file_ids = [f.get("id") for f in files.get("data").get("files")] @@ -328,7 +483,9 @@ def test_delete_file_from_model_output(client: Client, model_output_id: str): assert file_id not in file_ids -def test_delete_all_files_from_model_output(client: Client, model_output_id: str): +def test_delete_all_files_from_model_output( + client: Client, test_filepath: str, model_output_id: str +): """Test deleting all files from a model output. Parameters @@ -339,8 +496,11 @@ def test_delete_all_files_from_model_output(client: Client, model_output_id: str Model output ID. """ + # Upload a list of files + client.upload_files([test_filepath, test_filepath], model_output_id) + # Get the list of files and unpack to list - files = store.get("file_list") + files = client.list_files(model_output_id) file_ids = [f.get("id") for f in files.get("data").get("files")] # Make sure the list is more than 2 items long.