diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml new file mode 100644 index 0000000..1acf4fe --- /dev/null +++ b/.github/workflows/lint.yaml @@ -0,0 +1,35 @@ +name: Lint test + +on: [push] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + common-lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + - name: Install pre-commit hook + run: | + pip install pre-commit + - name: Linting + run: pre-commit run --all-files + doc-lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + - name: Install Nox + run: | + pip install nox + - name: Linting + run: nox -s doc diff --git a/.github/workflows/publish.yaml b/.github/workflows/publish.yaml new file mode 100644 index 0000000..1b1f902 --- /dev/null +++ b/.github/workflows/publish.yaml @@ -0,0 +1,41 @@ +name: Publish Package +on: + push: + tags: + - 'v*' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + publish: + name: Publish Package + runs-on: ubuntu-latest + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PAI_PYPI_TOKEN: ${{ secrets.PAI_PYPI_TOKEN }} + ALIPAI_PYPI_TOKEN: ${{ secrets.ALIPAI_PYPI_TOKEN }} + steps: + - uses: actions/checkout@v4 + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: '3.8' + - name: Install dependencies + run: pip install wheel setuptools twine + # build and upload package pai + - name: Build package for pai + run: python setup.py sdist bdist_wheel + - name: Publish package to PyPI (pai) + run: twine upload dist/* --skip-existing -u __token__ -p $PAI_PYPI_TOKEN + - name: cleanup + run: | + rm -rf dist + rm -rf build + rm -rf pai.egg-info + # build and upload package alipai + - name: Build package for alipai + run: PACKAGE_NAME=alipai python setup.py sdist bdist_wheel + - name: Publish package to PyPI (alipai) + run: twine upload dist/* --skip-existing -u __token__ -p $ALIPAI_PYPI_TOKEN diff --git a/.github/workflows/release_trigger.yaml b/.github/workflows/release_trigger.yaml new file mode 100644 index 0000000..2d6ee91 --- /dev/null +++ b/.github/workflows/release_trigger.yaml @@ -0,0 +1,59 @@ +name: Release Trigger +on: + pull_request: + types: [closed] + branches: + - master + paths: + - 'pai/version.py' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + publish: + name: Release Trigger + runs-on: ubuntu-latest + if: github.event.pull_request.merged == true && startsWith(github.head_ref, 'releases/v') + env: + PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }} + PAI_PYPI_TOKEN: ${{ secrets.PAI_PYPI_TOKEN }} + ALIPAI_PYPI_TOKEN: ${{ secrets.ALIPAI_PYPI_TOKEN }} + steps: + - uses: actions/checkout@v4 + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: '3.8' + - name: Check version match + id: check_version + run: | + BRANCH_VERSION=${{ github.head_ref }} + BRANCH_VERSION=${BRANCH_VERSION#releases/v} + FILE_VERSION=$(python -c "from pai.version import VERSION; print(VERSION)") + if [[ "$BRANCH_VERSION" != "$FILE_VERSION" ]]; then + echo "Version in branch name ($BRANCH_VERSION) does not match version in file ($FILE_VERSION)" + exit 1 + fi + - name: Get version and create version tag + run: | + VERSION=$(python -c "from pai.version import VERSION; print(VERSION)") + git tag v$VERSION + git push origin v$VERSION +# git tag pushed by GitHub action bot will not trigger another action. + - name: Install dependencies + run: pip install wheel setuptools twine + - name: Build package for pai + run: python setup.py sdist bdist_wheel + - name: Publish package to PyPI (pai) + run: twine upload dist/* --skip-existing -u __token__ -p $PAI_PYPI_TOKEN + - name: cleanup + run: | + rm -rf dist + rm -rf build + rm -rf pai.egg-info + - name: Build package for alipai + run: PACKAGE_NAME=alipai python setup.py sdist bdist_wheel + - name: Publish package to PyPI (alipai) + run: twine upload dist/* --skip-existing -u __token__ -p $ALIPAI_PYPI_TOKEN diff --git a/.github/workflows/unit.yaml b/.github/workflows/unit.yaml new file mode 100644 index 0000000..9d91f5b --- /dev/null +++ b/.github/workflows/unit.yaml @@ -0,0 +1,22 @@ +name: Unit test + +on: [push] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + unit-test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Set up Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + - name: Install Nox + run: | + pip install nox + - name: Linting + run: nox -s unit diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 89fd287..fe4eb0d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -37,7 +37,7 @@ repos: - -w - repo: https://github.com/pycqa/isort - rev: 5.10.1 + rev: 5.12.0 hooks: - id: isort name: isort (python) @@ -53,3 +53,8 @@ repos: rev: 0.6.1 hooks: - id: nbstripout + + - repo: https://github.com/gitleaks/gitleaks + rev: v8.16.1 + hooks: + - id: gitleaks diff --git a/README.md b/README.md index 77e2d45..494e2c0 100644 --- a/README.md +++ b/README.md @@ -1,82 +1,119 @@ # PAI Python SDK +[English](./README_CN.md) \| 简体中文 -English \| [简体中文](./README_CN.md) +PAI Python SDK是阿里云 [机器学习平台 PAI(Platform for Artificial Intelligence)](https://www.aliyun.com/product/bigdata/learn) 提供的Python SDK,提供了更易用的HighLevel API,支持机器学习工程师简单地使用Python在PAI完成模型训练和部署,串联机器学习的流程。 -The PAI Python SDK is provided by Alibaba Cloud\'s [Platform for Artificial Intelligence (PAI)](https://www.aliyun.com/product/bigdata/learn). It offers a user-friendly High-Level API, enabling machine learning engineers to easily train and deploy models on PAI using Python, streamlining the machine learning workflow. +## 🔧 安装 -## Installation 🔧 - -Install the PAI Python SDK using the following command, which supports Python versions \>= 3.6 (it is recommended to use Python \>= 3.8): +使用以下命令安装PAI Python SDK(支持Python版本 \>= 3.8): ```shell -python -m pip install alipai +python -m pip install pai ``` -## Documentation 📖 +## 📖 文档 -Find detailed documentation, including API references and user guides, in the [docs]{.title-ref} directory or visit [PAI Python SDK Documentation](https://pai-sdk.oss-cn-shanghai.aliyuncs.com/pai/doc/latest/index.html). +请通过访问 [PAI Python SDK文档](https://pai.readthedocs.io/) 或是查看 [docs](./docs) 目录下的文件获取SDK的详细文档,包括用户指南和API文档。 -## Basic Usage 🛠 +## 🛠 使用示例 -- Submit a custom training job +- 提交自定义训练任务 -The following example demonstrates how to submit a custom training job to PAI: +以下代码演示了如何通过SDK提交一个自定义的训练作业: ```python from pai.estimator import Estimator from pai.image import retrieve est = Estimator( - # Retrieve the latest PyTorch image provided by PAI + # 获取PAI提供的最新PyTorch镜像 image_uri=retrieve( framework_name="PyTorch", framework_version="latest" ).image_uri, command="echo hello", - # Optionally, specify the source_dir to upload your training code: + # 可选,指定source_dir上传你的训练代码: # source_dir="./train_src", instance_type="ecs.c6.large", ) - -# Submit the training job +# 提交训练任务 est.fit() - print(est.model_data()) + ``` -- Deploy Large Language Model +- 部署大语言模型 -PAI provides numerous pretrained models that you can easily deploy using the PAI Python SDK: +PAI提供了大量预训练模型,可以使用PAI Python SDK轻松部署: ```python from pai.model import RegisteredModel -# Retrieve the QWen-7b model provided by PAI -qwen_model = RegisteredModel("qwen-7b-chat-lora", model_provider="pai") +# 获取PAI提供的QWen1.5-7b模型 +qwen_model = RegisteredModel("qwen1.5-7b-chat", model_provider="pai") -# Deploy the model +# 部署模型 p = qwen_model.deploy(service_name="qwen_service") -# Call the service +# 调用服务 p.predict( data={ - "prompt": "How to install PyTorch?", - "system_prompt": "Act like you are programmer with 5+ years of experience.", + "prompt": "What is the purpose of life?", + "system_prompt": "You are helpful assistant.", "temperature": 0.8, } ) + +# PAI提供的大语言模型支持OpenAI API,可以通过openai SDK调用 +openai_client = p.openai() +res = openai_client.chat.completions.create( + model="default", + max_tokens=1024, + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "What is the purpose of life?"} + ] +) +print(res.choices[0].message.content) + ``` -For more details, please refer to the [PAI Python SDK Documentation](https://pai-sdk.oss-cn-shanghai.aliyuncs.com/pai/doc/latest/index.html). +- 微调预训练模型 + +通过PAI提供的微调脚本,提交一个模型微调任务 + +```python + +from pai.model import ModelTrainingRecipe + +training_recipe = ModelTrainingRecipe( + model_name="qwen2-0.5b-instruct", + model_provider="pai", + instance_type="ecs.gn6e-c12g1.3xlarge", +) + +training_recipe.train( + inputs={ + # 本地或是阿里云OSS上的数据路径(oss:///path/to/data) + "train": "" + } +) + + +``` + +通过访问PAI提供的示例仓库,可以了解更多使用示例:[pai-examples](https://github.com/aliyun/pai-examples/tree/master/pai-python-sdk) + +## 🤝 贡献代码 -## Contributing 🤝 +我们欢迎为PAI Python SDK贡献代码。请阅读 [CONTRIBUTING](./CONTRIBUTING.md) 文件了解如何为本项目贡献代码。 -Contributions to the PAI Python SDK are welcome. Please read our contribution guidelines in the [CONTRIBUTING](./CONTRIBUTING.md) file. +## 📝 许可证 -## License 📝 +PAI Python SDK是由阿里云开发,并根据Apache许可证(版本2.0)授权使用。 -PAI Python SDK is developed by Alibaba Cloud and licensed under the Apache License (Version 2.0). +## 📬 联系方式 -## Contact 📬 +如需支持或咨询,请在GitHub仓库中提交issue,或通过钉钉群联系我们: -For support or inquiries, please open an issue on the GitHub repository. +DingTalkGroup diff --git a/README_CN.md b/README_CN.md deleted file mode 100644 index 3ec96a6..0000000 --- a/README_CN.md +++ /dev/null @@ -1,78 +0,0 @@ -# PAI Python SDK - -[English](./README.md) \| 简体中文 - -PAI Python SDK是阿里云 [机器学习平台 PAI(Platform for Artificial Intelligence)](https://www.aliyun.com/product/bigdata/learn) 提供的Python SDK,提供了更易用的HighLevel API,支持机器学习工程师简单地使用Python在PAI完成模型训练和部署,串联机器学习的流程。 - -## 安装 🔧 - -使用以下命令安装PAI Python SDK(支持Python版本 \>= 3.6,建议使用Python版本 \>= 3.8): - -```shell -python -m pip install alipai -``` - -## 文档 📖 - -请通过访问 [PAI Python SDK文档](https://pai-sdk.oss-cn-shanghai.aliyuncs.com/pai/doc/latest/index.html) 或是查看 [docs](./docs) 目录下的文件获取SDK的详细文档,包括用户指南和API文档。 - -## 使用示例 🛠 - -- 提交自定义训练任务 - -以下代码演示了如何通过SDK提交一个自定义的训练作业: - -```python -from pai.estimator import Estimator -from pai.image import retrieve - -est = Estimator( - # 获取PAI提供的最新PyTorch镜像 - image_uri=retrieve( - framework_name="PyTorch", framework_version="latest" - ).image_uri, - command="echo hello", - # 可选,指定source_dir上传你的训练代码: - # source_dir="./train_src", - instance_type="ecs.c6.large", -) -# 提交训练任务 -est.fit() -print(est.model_data()) - -``` - -- 部署大语言模型 - -PAI提供了大量预训练模型,可以使用PAI Python SDK轻松部署: - -```python -from pai.model import RegisteredModel - -# 获取PAI提供的QWen-7b模型 -qwen_model = RegisteredModel("qwen-7b-chat-lora", model_provider="pai") -# 部署模型 -p = qwen_model.deploy(service_name="qwen_service") -# 调用服务 -p.predict( - data={ - "prompt": "如何安装PyTorch?", - "system_prompt": "表现得像一位有5年以上经验的程序员。", - "temperature": 0.8, - } -) -``` - -更多功能介绍,请参阅 [PAI Python SDK文档](https://pai-sdk.oss-cn-shanghai.aliyuncs.com/pai/doc/latest/index.html)。 - -## 贡献代码 🤝 - -我们欢迎为PAI Python SDK贡献代码。请阅读 [CONTRIBUTING](./CONTRIBUTING.md) 文件了解如何为本项目贡献代码。 - -## 许可证 📝 - -PAI Python SDK是由阿里云开发,并根据Apache许可证(版本2.0)授权使用。 - -## 联系方式 📬 - -如需支持或咨询,请在GitHub仓库中提Issue。 diff --git a/README_EN.md b/README_EN.md new file mode 100644 index 0000000..4268bcb --- /dev/null +++ b/README_EN.md @@ -0,0 +1,129 @@ +# PAI Python SDK + + +English \| [简体中文](./README.md) + +The PAI Python SDK is provided by Alibaba Cloud\'s [Platform for Artificial Intelligence (PAI)](https://www.aliyun.com/product/bigdata/learn). It offers a user-friendly High-Level API, enabling machine learning engineers to easily train and deploy models on PAI using Python, streamlining the machine learning workflow. + +## Installation 🔧 + +Install the PAI Python SDK using the following command, which supports Python versions \>= 3.8 : + +```shell +python -m pip install pai +``` + +## 📖 Documentation + +Find detailed documentation, including API references and user guides, in the [docs](./docs/) directory or visit [PAI Python SDK Documentation](https://pai.readthedocs.io/). + +## 🛠 Basic Usage + +- Submit a custom training job + +The following example demonstrates how to submit a custom training job to PAI: + +```python +from pai.estimator import Estimator +from pai.image import retrieve + +est = Estimator( + # Retrieve the latest PyTorch image provided by PAI + image_uri=retrieve( + framework_name="PyTorch", framework_version="latest" + ).image_uri, + command="echo hello", + # Optionally, specify the source_dir to upload your training code: + # source_dir="./train_src", + instance_type="ecs.c6.large", +) + +# Submit the training job +est.fit() + +print(est.model_data()) +``` + +- Deploy Large Language Model + +PAI provides numerous pretrained models that you can easily deploy using the PAI Python SDK: + +```python +from pai.model import RegisteredModel + +# Retrieve the QWen1.5-7b model provided by PAI +qwen_model = RegisteredModel("qwen1.5-7b-chat", model_provider="pai") + +# Deploy the model +p = qwen_model.deploy(service_name="qwen_service") + +# Call the service +p.predict( + data={ + "prompt": "How to install PyTorch?", + "system_prompt": "You are helpful assistant.", + "temperature": 0.8, + } +) + +# Call the LLM service with openai SDK. +openai_client = p.openai() +res = openai_client.chat.completions.create( + model="default", + max_tokens=1024, + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "What is the purpose of life?"} + ] +) +print(res.choices[0].message.content) + + +``` + +- Fine-tune the pretrained model +- +Submit a model fine-tuning task using the fine-tuning script provided by PAI. + +```python + +from pai.model import ModelTrainingRecipe + +# Retrieve the Qwen2-0.5b-instruct model training recipe provided by PAI +training_recipe = ModelTrainingRecipe( + model_name="qwen2-0.5b-instruct", + model_provider="pai", + instance_type="ecs.gn6e-c12g1.3xlarge", +) + +# Submit the training job +job = training_recipe.train( + inputs={ + # Data path on local or Alibaba Cloud OSS (oss:///path/to/data) + "train": "" + } +) + +# Get output model path +print(training_recipe.model_data()) + +# Deploy the fine-tuned model +predictor = training_recipe.deploy(service_name="qwen2_finetune") + +``` + +You can learn more usage examples by visiting the PAI example repository: [pai-examples](https://github.com/aliyun/pai-examples/tree/master/pai-python-sdk) + +## 🤝 Contributing + +Contributions to the PAI Python SDK are welcome. Please read our contribution guidelines in the [CONTRIBUTING](./CONTRIBUTING.md) file. + +## 📝 License + +PAI Python SDK is developed by Alibaba Cloud and licensed under the Apache License (Version 2.0). + +## 📬 Contact + +For support or inquiries, please open an issue on the GitHub repository or contact us in the DingTalk group: + +DingTalkGroup diff --git a/assets/dingtalk-group.png b/assets/dingtalk-group.png new file mode 100644 index 0000000..012aed3 Binary files /dev/null and b/assets/dingtalk-group.png differ diff --git a/assets/logo.svg b/assets/logo.svg new file mode 100644 index 0000000..422f09a --- /dev/null +++ b/assets/logo.svg @@ -0,0 +1 @@ + diff --git a/docs/source/api/estimator.rst b/docs/source/api/estimator.rst index a87f7d1..ff70670 100644 --- a/docs/source/api/estimator.rst +++ b/docs/source/api/estimator.rst @@ -9,7 +9,7 @@ Estimator :members: :show-inheritance: -.. autoclass:: pai.estimator.UserVpcConfig +.. autoclass:: pai.common.configs.UserVpcConfig :members: :show-inheritance: diff --git a/docs/source/api/processor.rst b/docs/source/api/processor.rst new file mode 100644 index 0000000..64d7747 --- /dev/null +++ b/docs/source/api/processor.rst @@ -0,0 +1,6 @@ +Processor +--------- + +.. autoclass:: pai.processor.Processor + :members: + :show-inheritance: diff --git a/docs/source/conf.py b/docs/source/conf.py index 915cfd5..ecdcf76 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -17,6 +17,7 @@ doc_root_dir = dirname(dirname(os.path.abspath(__file__))) sys.path.insert(0, dirname(doc_root_dir)) +pkg_root = dirname(doc_root_dir) import pai @@ -27,7 +28,8 @@ author = "Alibaba Cloud" # The full version, including alpha/beta/rc tags -release = pai.__version__ +release = pai.version.VERSION +version = pai.version.VERSION # -- General configuration --------------------------------------------------- @@ -37,7 +39,6 @@ # ones. extensions = [ "sphinx.ext.autodoc", - # "sphinx.ext.viewcode", "sphinx.ext.napoleon", "sphinx_copybutton", "sphinx_markdown_builder", diff --git a/docs/source/index.rst b/docs/source/index.rst index 154a6a6..1ba224f 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -27,16 +27,7 @@ PAI Python SDK 文档 user-guide/train user-guide/inference user-guide/pretrained-model - - -.. toctree:: - :maxdepth: 1 - :caption: 示例教程 - - tutorial/framework - tutorial/train - tutorial/predict - tutorial/advance + user-guide/processing-job .. toctree:: diff --git a/docs/source/quick-tour/installation.rst b/docs/source/quick-tour/installation.rst index b48464e..b160c7a 100644 --- a/docs/source/quick-tour/installation.rst +++ b/docs/source/quick-tour/installation.rst @@ -5,11 +5,11 @@ 安装 ------ -请通过以下命令安装PAI Python SDK(请使用Python>=3.6)。 +请通过以下命令安装PAI Python SDK(请使用Python>=3.8)。 .. parsed-literal:: - python -m pip install alipai + python -m pip install pai 前提条件 diff --git a/docs/source/reference.rst b/docs/source/reference.rst index b4cb15f..0bdd211 100644 --- a/docs/source/reference.rst +++ b/docs/source/reference.rst @@ -5,6 +5,7 @@ API 文档 :maxdepth: 1 api/estimator + api/processor api/model api/image api/predictor diff --git a/docs/source/tutorial/.gitignore b/docs/source/tutorial/.gitignore deleted file mode 100644 index 879843a..0000000 --- a/docs/source/tutorial/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -test_data -train_data -xgb_src -train_src -infer_src -tf_train_src -fashion-mnist -data -bert diff --git a/docs/source/tutorial/advance.rst b/docs/source/tutorial/advance.rst deleted file mode 100644 index fbeebce..0000000 --- a/docs/source/tutorial/advance.rst +++ /dev/null @@ -1,10 +0,0 @@ -=========================================== -AIGC && LLM -=========================================== - -.. toctree:: - :maxdepth: 1 - - stable_diffusion_lora/stable_diffusion_lora - chatglm2_finetune/chatglm2_finetune - baichuan2_finetune/baichuan2_finetune diff --git a/docs/source/tutorial/async_inference/async_inference.ipynb b/docs/source/tutorial/async_inference/async_inference.ipynb deleted file mode 100644 index b28c169..0000000 --- a/docs/source/tutorial/async_inference/async_inference.ipynb +++ /dev/null @@ -1,464 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 部署异步推理服务\n", - "\n", - "在复杂的模型推理场景中,例如AIGC、视频处理等场景中,模型服务推理耗时较长,存在长连接超时导致请求失败或实例负载不均衡等问题,不适用于实时推理的场景。针对以上问题,PAI提供了异步推理服务,用于支持类似的场景,用户可以在提交预测请求之后,通过轮询或是订阅的方式获取到推理服务的预测结果。\n", - "\n", - "在当前文档中,我们将介绍如何使用PAI Python SDK在PAI上部署和调用异步推理服务。" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 准备工作\n", - "\n", - "我们可以通过以下命令安装PAI Python SDK。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "skip-execution" - ] - }, - "outputs": [], - "source": [ - "\n", - "!python -m pip install --upgrade alipai" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "SDK需要配置访问阿里云服务需要的 AccessKey,以及当前使用的工作空间和OSS Bucket。在PAI Python SDK安装之后,通过在 **命令行终端** 中执行以下命令,按照引导配置密钥,工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过以下代码验证当前的配置。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "sess = get_default_session()\n", - "\n", - "assert sess.workspace_name is not None\n", - "print(sess.workspace_name)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 部署异步推理服务模型\n", - "\n", - "将模型部署为异步推理服务与部署标准的在线推理服务类似,用户仅需在部署时(`Model.deploy`),传递`service_type=ServicType.Async`即可。\n", - "\n", - "当前流程中,我们将使用镜像部署的模式,部署一个异步的推理服务。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "# 准备异步推理服务的应用代码目录\n", - "!mkdir -p serve_src/" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "通过`%%writefile`指令,我们将推理服务代码写入到`serve_src/run.py`文件中。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile serve_src/run.py\n", - "import asyncio\n", - "from random import random\n", - "\n", - "from fastapi import FastAPI, Request\n", - "import uvicorn, json, datetime\n", - "\n", - "# 默认模型加载路径\n", - "model_path = \"/eas/workspace/model/\"\n", - "\n", - "app = FastAPI()\n", - "\n", - "\n", - "@app.post(\"/\")\n", - "async def create_item(request: Request):\n", - " print(\"Make mock prediction starting ...\")\n", - " # Mock prediction\n", - " await asyncio.sleep(15)\n", - " print(\"Prediction finished.\")\n", - " return [random() for _ in range(10)]\n", - "\n", - "\n", - "if __name__ == \"__main__\":\n", - " uvicorn.run(app, host=\"0.0.0.0\", port=8000, workers=1)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "我们将使用PAI提供的PyTorch推理镜像部署以上的模型。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.model import Model, container_serving_spec, ServiceType\n", - "from pai.image import retrieve, ImageScope\n", - "\n", - "m = Model(\n", - " inference_spec=container_serving_spec(\n", - " source_dir=\"serve_src\",\n", - " command=\"python run.py\",\n", - " image_uri=retrieve(\n", - " \"PyTorch\",\n", - " framework_version=\"1.10\",\n", - " accelerator_type=\"gpu\",\n", - " image_scope=ImageScope.INFERENCE,\n", - " ),\n", - " requirements=[\n", - " \"fastapi\",\n", - " \"uvicorn\",\n", - " ],\n", - " )\n", - " # 用户可以通过`model_data`参数,传递一个OSS上的模型。相应的模型会被加载到推理服务的容器中。\n", - " # model_data=\"oss:///path/to/model/\"\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "通过设置部署服务的`service_type=ServiceType.Async`参数,我们可以将模型部署为异步推理服务。异步推理服务使用分别使用输入队列(source)和输出队列(sink)保存预测请求和预测结果。通过`options`参数,可以配置队列使用的资源,队列最大长度,是否开启自动驱逐等高阶参数。异步服务支持的完整的高阶参数,请参考文档:[异步服务-参数配置](https://help.aliyun.com/document_detail/476812.html?#section-gor-gne-gtq)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.predictor import AsyncPredictor\n", - "from pai.common.utils import random_str\n", - "\n", - "\n", - "service_name = f\"async_service_example_{random_str(6)}\"\n", - "\n", - "p: AsyncPredictor = m.deploy(\n", - " service_name=service_name,\n", - " instance_type=\"ecs.c6.large\",\n", - " # 设置当前部署的服务类型为异步服务\n", - " service_type=ServiceType.Async,\n", - " # 用户可以通过options字段配置高阶参数\n", - " options={\n", - " # 异步推理详细参数文档: https://help.aliyun.com/document_detail/476812.html\n", - " \"queue.cpu\": 2, # 队列使用的CPU核数,默认为1\n", - " \"queue.memory\": 2048, # 异步服务使用过的队列内存,单位为MB\n", - " },\n", - ")\n", - "\n", - "print()\n", - "\n", - "print(p)\n", - "print(p.service_name)\n", - "print(p.access_token)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 调用推理服务\n", - "\n", - "用户发送调用异步队列服务与请求同步推理服务的方式相同,但是异步推理服务会立即返回本次预测请求的`RequestId`,而不是预测结果。用户可以通过轮询获取到推理服务的预测结果。\n", - "\n", - "- **用户客户端**发送推理请求,加入到推理服务的输入队列中,PAI-EAS返回请求的RequestId。\n", - "- PAI处理输入队列中的请求,转发给到**用户的推理服务**,推理服务处理完请求后,将结果写入到输出队列中\n", - "- **用户客户端**可以通过RequestId轮询,可以获取到**用户推理服务**的预测结果\n", - "\n", - "\n", - "PAI Python SDK提供了`AsyncPredictor`,支持用户更加简单得调用异步推理服务。" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 调用异步推理服务\n", - "\n", - "`AsyncPredictor`提供了`predict`和`raw_predict`方法发送预测请求,它们都会返回一个`AsyncTask`,用户可以通过`AsyncTask.result()`获取预测结果。 \n", - "\n", - "二者的区别在于`predict`方法会使用`Serializer`对象对输入数据进行序列化,对预测结果进行反序列化,而`raw_predict`方法直接将输入数据发送给异步推理服务,返回HTTP响应结果(`RawResponse`)。\n", - "\n", - "```python\n", - "\n", - "from pai.predictor import AsyncPredictor, AsyncTask\n", - "from pai.serializer import JsonSerializer\n", - "\n", - "p = AsyncPredictor(service_name='test_async_service', serializer=JsonSerializer())\n", - "\n", - "t1: AsyncTask = p.predict(data={\"some\": \"data\"})\n", - "# result是推理服务的响应结果(Response Body),经过Serialzier.deserialize处理后返回的结果.\n", - "result = t1.result()\n", - "\n", - "\n", - "t2: AsyncTask = p.raw_predict(data=b'{\"some\": \"data\"}')\n", - "resp: RawResponse = t2.result()\n", - "print(resp.status_code, resp.content)\n", - "\n", - "```\n", - "\n", - "`AsyncPredictor`会维护一个线程池,通过一个线程去发送推理请求,并等待请求处理完成。用户可以通过`max_workers`参数配置线程池的大小。\n", - "\n", - "```python\n", - "\n", - "p = AsyncPredictor(service_name='test_async_service', max_workers=20)\n", - "\n", - "```\n", - "\n", - "当用户需要在异步请求完成之后,对于响应的结果进行处理时,可以通过`callback`参数传递一个回调函数。回调函数的参数为`AsyncTask.result()`,也就实际响应的结果。\n", - "\n", - "\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "以下的示例代码中,我们将使用`AsyncPredictor`调用异步推理服务,并通过会回调函数处理预测结果。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.predictor import RawResponse, AsyncTask\n", - "import time\n", - "\n", - "# 结果列表\n", - "results = []\n", - "\n", - "\n", - "# 定义回调函数\n", - "def callback_fn(resp: RawResponse):\n", - " print(\"Callback: get prediction result \", resp.json())\n", - " results.append(resp.json())\n", - "\n", - "\n", - "# 发送预测请求,使用回调函数处理预测结果。\n", - "task: AsyncTask = p.raw_predict(\n", - " data=b\"test_data\",\n", - " callback=callback_fn,\n", - ")\n", - "\n", - "# result() 方法等待预测完成\n", - "resp: RawResponse = task.result()\n", - "print(resp.json())\n", - "\n", - "# 等待回调函数执行完成\n", - "time.sleep(1)\n", - "\n", - "print(results)\n", - "assert len(results) == 1" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "以下示例中,我们批量发送异步推理请求,然后等待所有的请求完成。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "tasks = []\n", - "\n", - "for i in range(10):\n", - " task: AsyncTask = p.raw_predict(\n", - " data=b\"test_data\",\n", - " callback=lambda x: print(\"Prediction result: \", x.json()),\n", - " )\n", - " tasks.append(task)\n", - "\n", - "prediction_results = [t.result().json() for t in tasks]\n", - "\n", - "print(prediction_results)\n", - "print(len(prediction_results))" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 使用异步API调用推理服务\n", - "\n", - "`AsyncPredictor` 提供了异步API `raw_predict_async` 和 `predict_async`,支持用户使用Python提供的异步框架(asyncio)调用推理服务。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.predictor import RawResponse\n", - "\n", - "# 使用异步API调用异步推理服务\n", - "res: RawResponse = await p.raw_predict_async(data=b\"test_data\")\n", - "\n", - "print(res.status_code)\n", - "print(res.content)\n", - "print(res.json())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "通过SDK提供的异步API,我们可以不借助于线程池,批量发送异步预测请求。\n", - "\n", - "以下的示例中,我们将使用异步API,批量发送异步预测请求,等待推理完成,并使用回调函数打印预测请求结果。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import asyncio\n", - "\n", - "\n", - "# 定义回调函数\n", - "def task_done_cb(task: asyncio.Task):\n", - " if task.exception():\n", - " raise task.exception()\n", - " else:\n", - " print(\"Prediction result: \", task.result().json())\n", - "\n", - "\n", - "# 使用异步API批量调用异步推理服务\n", - "async def batch_predict():\n", - " tasks = []\n", - " for _ in range(10):\n", - " task = asyncio.create_task(\n", - " # raw_predict_async 是一个coroutine\n", - " p.raw_predict_async(\n", - " data=b\"test_data\",\n", - " )\n", - " )\n", - " # 调用完成之后,打印调用返回结果\n", - " task.add_done_callback(task_done_cb)\n", - "\n", - " tasks.append(task)\n", - " # 等待所有任务完成\n", - " return await asyncio.gather(*tasks, return_exceptions=True)\n", - "\n", - "\n", - "batch_results = await batch_predict()\n", - "\n", - "\n", - "for result in batch_results:\n", - " print(result.json())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "测试完成之后,可以使用`delete_service`方法删除对应服务,释放资源。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "p.delete_service()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "pai-dev-py38", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.16" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/source/tutorial/baichuan2_finetune/.gitignore b/docs/source/tutorial/baichuan2_finetune/.gitignore deleted file mode 100644 index f23b395..0000000 --- a/docs/source/tutorial/baichuan2_finetune/.gitignore +++ /dev/null @@ -1 +0,0 @@ -swift diff --git a/docs/source/tutorial/baichuan2_finetune/baichuan2_finetune.ipynb b/docs/source/tutorial/baichuan2_finetune/baichuan2_finetune.ipynb deleted file mode 100644 index d7b2576..0000000 --- a/docs/source/tutorial/baichuan2_finetune/baichuan2_finetune.ipynb +++ /dev/null @@ -1,292 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 使用ModelScope Swift微调Baichuan2模型" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 介绍\n", - "\n", - "[Baichuan 2](https://github.com/baichuan-inc/Baichuan2)是[百川智能](https://www.baichuan-ai.com/home)推出的开源大语言模型,采用2.6万亿Tokens的高质量语料进行训练,在多个权威的中文、英文和多语言的通用、领域benchmark上取得了同尺寸最佳的效果。`Baichuan2` 目前发布了7B、13B的Base和Chat版本,支持模型商用。\n", - "\n", - "当在特定领域使用大语言模型时,可以通过prompt的方式引导模型,也可以通过在领域数据集上微调训练,从而在领域的任务上获得更好的效果。后者的优点是不依赖于Prompt(可能超过模型的输入长度上限),有更好的推理性能,并且经过微调后,在领域相关任务上有更好的效果。\n", - "\n", - "本文将介绍如何在PAI对`Baichuan2`模型完成微调训练。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "## 安装和配置SDK\n", - "\n", - "我们需要首先安装PAI Python SDK以运行本示例。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "!python -m pip install --upgrade alipai" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "SDK需要配置访问阿里云服务需要的AccessKey,以及当前使用的工作空间和OSS Bucket。在PAI SDK安装之后,通过在**命令行终端** 中执行以下命令,按照引导配置密钥、工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过以下代码验证配置是否已生效。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "\n", - "sess = get_default_session()\n", - "\n", - "# 获取配置的工作空间信息\n", - "assert sess.workspace_name is not None\n", - "print(sess.workspace_name)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 准备训练脚本\n", - "\n", - "`ModelScope`提供了[SWIFT(Scalable lightWeight Infrastructure for Fine-Tuning)](https://github.com/modelscope/swift#swiftscalable-lightweight-infrastructure-for-fine-tuning)框架,支持模型的全参数微调,也集成了各种高效微调方法,例如`LoRA`、`QLoRA`等,支持用户对`Baichuan2`、`QWen`、`llama2`等常见的语言进行微调训练。\n", - "\n", - "基于[Swift的LLM finetune脚本](https://github.com/modelscope/swift/blob/main/examples/pytorch/llm/src/llm_sft.py),我们修改了部分逻辑,从而支持用户在PAI的训练作业中使用,主要包括:\n", - "\n", - "- 使用PAI预置的`Baichuan2-Base`模型\n", - "\n", - "对于热门的社区模型,PAI提供了模型缓存在OSS Bucket上,支持挂载到训练作业,训练脚本可以通过读取本地文件的方式加载获取模型。\n", - "\n", - "- 保存模型\n", - "\n", - "训练脚本需要将模型保存到指定路径(`/ml/output/model`),从而将模型保存到用户的OSS Bucket中。\n", - "\n", - "- 训练依赖的第三方\n", - "\n", - "训练作业将运行在PAI提供的`PyTorch`基础镜像上,我们需要在作业环境中安装`transformers`、`datasets`、`swift`、`xformers`等第三方依赖。PAI训练作业支持使用训练脚本目录下的`requirements.txt`安装第三方依赖。\n", - "\n", - "\n", - "完整的训练脚本请参考 `train_src` 目录下的训练文件(`llm_sft.py`)。\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 提交训练作业\n", - "\n", - "使用提交任务的方式训练模型,能够支持用户并行运行多个训练任务,高效得探索不同的超参组合对于模型性能影响,并且能够支持分布式训练。通过PAI Python SDK提供的`Estimator`API,我们可以方便得将一个本地训练脚本提交到PAI上运行。\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "我们将通过以下代码配置训练作业脚本、作业启动命令、使用的作业镜像,以及机器实例规格,提交训练作业。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.image import retrieve\n", - "from pai.estimator import Estimator\n", - "\n", - "# 训练作业启动命令\n", - "# 完整的参数说明请参考文档:https://github.com/modelscope/swift/blob/main/examples/pytorch/llm/README_CN.md#sftsh-%E5%91%BD%E4%BB%A4%E8%A1%8C%E5%8F%82%E6%95%B0\n", - "command = \"\"\"CUDA_VISIBLE_DEVICES=0 \\\n", - "python llm_sft.py \\\n", - " --model_type baichuan2-7b \\\n", - " --sft_type lora \\\n", - " --template_type default-generation \\\n", - " --dtype fp16 \\\n", - " --output_dir /ml/output/model/ \\\n", - " --dataset advertise-gen \\\n", - " --train_dataset_sample 20000 \\\n", - " --num_train_epochs 1 \\\n", - " --max_length 2048 \\\n", - " --quantization_bit 4 \\\n", - " --lora_rank 8 \\\n", - " --lora_alpha 32 \\\n", - " --lora_dropout_p 0. \\\n", - " --lora_target_modules ALL \\\n", - " --gradient_checkpointing true \\\n", - " --batch_size 16 \\\n", - " --weight_decay 0. \\\n", - " --learning_rate 1e-4 \\\n", - " --gradient_accumulation_steps 4 \\\n", - " --max_grad_norm 0.5 \\\n", - " --warmup_ratio 0.03 \\\n", - " --eval_steps 100 \\\n", - " --save_steps 100 \\\n", - " --save_total_limit 2 \\\n", - " --logging_steps 10\n", - "\"\"\"\n", - "\n", - "\n", - "# 配置训练作业\n", - "est = Estimator(\n", - " source_dir=\"train_src/\", # 代码目录\n", - " image_uri=retrieve(\"PyTorch\", framework_version=\"latest\").image_uri, # 训练作业使用的镜像\n", - " command=command, # 训练启动命令\n", - " instance_type=\"ecs.gn6e-c12g1.3xlarge\", # 使用的机器规格示例,V100(32G)\n", - " instance_count=1, # 机器实例个数\n", - " base_job_name=\"baichuan2_finetune\", # 训练作业名称\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "PAI提供了预置的`Baichuan2-Base`模型,可以通过以下方式获取对应的模型`OSS Bucket`路径。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.model import RegisteredModel\n", - "\n", - "# 获取PAI提供的Baichuan2-7B-Base模型\n", - "m = RegisteredModel(\n", - " model_name=\"baichuan-inc/Baichuan2-7B-Base\", model_provider=\"huggingface\"\n", - ")\n", - "\n", - "# 模型地址\n", - "print(m.model_data)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "提交训练作业,等待作业完成。用户可以通过打印的作业详情页URL,查看训练作业进度,资源使用,日志等信息。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "notebookRunGroups": { - "groupValue": "2" - } - }, - "outputs": [], - "source": [ - "# 提交训练作业\n", - "est.fit(\n", - " inputs={\n", - " # 训练代码可以从 /ml/input/data/pretrained_model/ 目录下读取挂载的预训练模型\n", - " \"pretrained_model\": m.model_data,\n", - " },\n", - " wait=False, # 是否等待训练作业完成\n", - ")\n", - "\n", - "# 打开一个TensorBoard,监控训练作业\n", - "tb = est.tensorboard()\n", - "\n", - "\n", - "# 等待训练作业完成\n", - "est.wait()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "训练作业写出到 `/ml/output/model` 目录下的模型文件和checkpoints将被保存到用户的OSS Bucket中,可以通过 `est.model_data()` 获取 OSS Bucket路径。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 查看数据模型的OSS Bucket路径\n", - "print(est.model_data())\n", - "\n", - "\n", - "# 删除启动的TensorBoard(每一个账号下最多能够启动5个TensorBoard示例)\n", - "tb.delete()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 结语\n", - "\n", - "在当前示例中,我们展示了如何基于`ModelScope Swift`框架,使用PAI预置的`Baichuan2-Base`模型,完成`Baichuan2`模型的微调训练。用户可以参考以上的示例,修改脚本,使用用户自定义的数据集,或是修改使用的基础预训练模型,完成自定义语言模型的微调训练。\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "pai-dev-py38", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.16" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/source/tutorial/chatglm2_finetune/chatglm2_finetune.ipynb b/docs/source/tutorial/chatglm2_finetune/chatglm2_finetune.ipynb deleted file mode 100644 index db4cd46..0000000 --- a/docs/source/tutorial/chatglm2_finetune/chatglm2_finetune.ipynb +++ /dev/null @@ -1,920 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 微调和部署对话模型ChatGLM2-6B\n", - "\n", - "[ChatGLM2-6B](https://www.modelscope.cn/models/ZhipuAI/chatglm2-6b/summary)是中英文对话模型[ChatGLM-6B](https://github.com/THUDM/ChatGLM-6B) 的第二代版本,在保留了初代模型对话流畅、部署门槛较低等众多优秀特性的基础之上,ChatGLM2-6B 引入了多项升级,包括更强大的性能、更长的上下文、更高效的推理。\n", - "\n", - "在本示例中,我们将展示:\n", - "\n", - "- 将ChatGLM2-6B部署到PAI创建推理服务,基于推理服务API和Gradio实现一个简易对话机器人。\n", - "\n", - "- 在PAI对ChatGLM2-6B进行微调训练,并将微调的模型部署创建推理服务。\n", - "\n", - "\n", - "## 准备工作\n", - "\n", - "### 前提条件\n", - "\n", - "- 已获取阿里云账号的鉴权AccessKey ID和AccessKey Secret,详情请参见:[获取AccessKey](https://help.aliyun.com/document_detail/116401.html)。\n", - "- 已创建或是加入一个PAI AI工作空间,详情请参见:[创建工作空间](https://help.aliyun.com/document_detail/326193.html)。\n", - "- 已创建OSS Bucket,详情请参见:[控制台创建存储空间](https://help.aliyun.com/document_detail/31885.html)。\n", - "\n", - "\n", - "### 安装和配置PAI Python SDK\n", - "\n", - "我们将使用PAI提供的Python SDK,提交训练作业,部署模型。可以通过以下命令安装PAI Python SDK。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!python -m pip install --upgrade alipai\n", - "!python -m pip install gradio" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "SDK需要配置访问阿里云服务需要的 AccessKey,以及当前使用的工作空间和OSS Bucket。在PAI Python SDK安装之后,通过在 **命令行终端** 中执行以下命令,按照引导配置密钥,工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过以下代码验证当前的配置。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "sess = get_default_session()\n", - "\n", - "assert sess.workspace_name is not None\n", - "print(sess.workspace_name)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 直接部署ChatGLM2\n", - "\n", - "`ChatGLM2-6B`是一个对话语言模型,能够基于历史对话信息,和用户的Prompt输入,进行反馈。通过HuggingFace的transformers库用户可以直接使用`ChatGLM2-6B`提供的对话能力,示例如下:\n", - "\n", - "```python\n", - "\n", - ">>> from transformers import AutoTokenizer, AutoModel\n", - ">>> tokenizer = AutoTokenizer.from_pretrained(\"THUDM/chatglm2-6b\", trust_remote_code=True)\n", - ">>> model = AutoModel.from_pretrained(\"THUDM/chatglm2-6b\", trust_remote_code=True).half().cuda()\n", - ">>> model = model.eval()\n", - ">>> response, history = model.chat(tokenizer, \"你好\", history=[])\n", - ">>> print(response)\n", - "你好👋!我是人工智能助手 ChatGLM2-6B,很高兴见到你,欢迎问我任何问题。\n", - ">>> response, history = model.chat(tokenizer, \"晚上睡不着应该怎么办\", history=history)\n", - ">>> print(response)\n", - "晚上睡不着可能会让你感到焦虑或不舒服,但以下是一些可以帮助你入睡的方法:\n", - "\n", - "1. 制定规律的睡眠时间表:保持规律的睡眠时间表可以帮助你建立健康的睡眠习惯,使你更容易入睡。尽量在每天的相同时间上床,并在同一时间起床。\n", - "2. 创造一个舒适的睡眠环境:确保睡眠环境舒适,安静,黑暗且温度适宜。可以使用舒适的床上用品,并保持房间通风。\n", - "3. 放松身心:在睡前做些放松的活动,例如泡个热水澡,听些轻柔的音乐,阅读一些有趣的书籍等,有助于缓解紧张和焦虑,使你更容易入睡。\n", - "4. 避免饮用含有咖啡因的饮料:咖啡因是一种刺激性物质,会影响你的睡眠质量。尽量避免在睡前饮用含有咖啡因的饮料,例如咖啡,茶和可乐。\n", - "5. 避免在床上做与睡眠无关的事情:在床上做些与睡眠无关的事情,例如看电影,玩游戏或工作等,可能会干扰你的睡眠。\n", - "6. 尝试呼吸技巧:深呼吸是一种放松技巧,可以帮助你缓解紧张和焦虑,使你更容易入睡。试着慢慢吸气,保持几秒钟,然后缓慢呼气。\n", - "\n", - "如果这些方法无法帮助你入睡,你可以考虑咨询医生或睡眠专家,寻求进一步的建议。\n", - "\n", - "\n", - "\n", - "```\n", - "\n", - "以下的流程中,我们将`ChatGLM2-6B`部署到PAI创建一个推理服务,然后基于推理服务的API,使用Gradio创建一个对话机器人。" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "### 获取ChatGLM2模型\n", - "\n", - "推理服务和训练作业中都需要加载使用模型,PAI在部分region上提供模型缓存,支持用户能够更快地获取到相应的模型。用户可以通过以下代码获取相应的模型,然后在训练作业和推理服务中加载使用。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.model import RegisteredModel\n", - "\n", - "m = RegisteredModel(\n", - " \"THUDM/chatglm2-6b\",\n", - " model_provider=\"huggingface\",\n", - ")\n", - "\n", - "model_uri = m.model_data\n", - "print(model_uri)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 创建推理服务\n", - "\n", - "PAI-EAS是阿里云PAI提供模型在线服务平台,支持用户一键部署推理服务或是AIWeb应用,支持异构资源,弹性扩缩容。PAI-EAS支持使用镜像的方式部署模型,以下的流程,我们将使用PAI提供的PyTorch推理镜像,将以上的模型部署为推理服务。\n", - "\n", - "\n", - "在部署推理服务之前,我们需要准备相应的推理服务程序,他负责加载模型,提供对应的HTTP API服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!mkdir -p server_src" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "完整的推理服务代码如下:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile server_src/run.py\n", - "# source: https://github.com/THUDM/ChatGLM-6B/blob/main/api.py\n", - "\n", - "import os\n", - "\n", - "from fastapi import FastAPI, Request\n", - "from transformers import AutoTokenizer, AutoModel, AutoConfig\n", - "import uvicorn, json, datetime\n", - "import torch\n", - "\n", - "\n", - "model = None\n", - "tokenizer = None\n", - "\n", - "# 默认的模型保存路径\n", - "chatglm_model_path = \"/eas/workspace/model/\"\n", - "# ptuning checkpoints保存路径\n", - "ptuning_checkpoint = \"/ml/ptuning_checkpoints/\"\n", - "pre_seq_len = 128\n", - "app = FastAPI()\n", - "\n", - "\n", - "def load_model():\n", - " global model, tokenizer\n", - " tokenizer = AutoTokenizer.from_pretrained(chatglm_model_path, trust_remote_code=True)\n", - "\n", - " if os.path.exists(ptuning_checkpoint):\n", - " # P-tuning v2\n", - " print(f\"Loading model/ptuning_checkpoint weight...\")\n", - " config = AutoConfig.from_pretrained(chatglm_model_path, trust_remote_code=True)\n", - " config.pre_seq_len = pre_seq_len\n", - " config.prefix_projection = False\n", - "\n", - " model = AutoModel.from_pretrained(chatglm_model_path, config=config, trust_remote_code=True)\n", - " tokenizer = AutoTokenizer.from_pretrained(chatglm_model_path, trust_remote_code=True)\n", - " prefix_state_dict = torch.load(os.path.join(ptuning_checkpoint, \"pytorch_model.bin\"))\n", - " new_prefix_state_dict = {}\n", - " for k, v in prefix_state_dict.items():\n", - " if k.startswith(\"transformer.prefix_encoder.\"):\n", - " new_prefix_state_dict[k[len(\"transformer.prefix_encoder.\"):]] = v\n", - " model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict)\n", - "\n", - " model = model.half().cuda()\n", - " model.transformer.prefix_encoder.float().cuda()\n", - " model.eval()\n", - " else:\n", - " print(f\"Loading model weight...\")\n", - " model = AutoModel.from_pretrained(chatglm_model_path, trust_remote_code=True)\n", - " model.half().cuda()\n", - " model.eval()\n", - "\n", - "\n", - "\n", - "@app.post(\"/\")\n", - "async def create_item(request: Request):\n", - " global model, tokenizer\n", - " json_post_raw = await request.json()\n", - " json_post = json.dumps(json_post_raw)\n", - " json_post_list = json.loads(json_post)\n", - " prompt = json_post_list.get('prompt')\n", - " history = json_post_list.get('history')\n", - " max_length = json_post_list.get('max_length')\n", - " top_p = json_post_list.get('top_p')\n", - " temperature = json_post_list.get('temperature')\n", - " response, history = model.chat(tokenizer,\n", - " prompt,\n", - " history=history,\n", - " max_length=max_length if max_length else 2048,\n", - " top_p=top_p if top_p else 0.7,\n", - " temperature=temperature if temperature else 0.95)\n", - " now = datetime.datetime.now()\n", - " time = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n", - " answer = {\n", - " \"response\": response,\n", - " \"history\": history,\n", - " \"status\": 200,\n", - " \"time\": time\n", - " }\n", - " log = \"[\" + time + \"] \" + '\", prompt:\"' + prompt + '\", response:\"' + repr(response) + '\"'\n", - " print(log)\n", - " return answer\n", - "\n", - "\n", - "if __name__ == '__main__':\n", - " load_model()\n", - " uvicorn.run(app, host='0.0.0.0', port=8000, workers=1)\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "我们将使用PyTorch镜像运行相应的推理服务,在启动服务之前需要安装模型依赖的相关的依赖。我们可以在`server_src`下准备依赖的`requirements.txt`,对应的`requirements.txt`会在推理服务启动之前被安装到环境中。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile server_src/requirements.txt\n", - "\n", - "# 模型需要的依赖\n", - "transformers==4.30.2\n", - "accelerate\n", - "icetk\n", - "cpm_kernels\n", - "\n", - "torch>=2.0,<2.1\n", - "gradio\n", - "mdtex2html\n", - "sentencepiece\n", - "accelerate\n", - "\n", - "# 推理服务Server的依赖\n", - "fastapi\n", - "uvicorn" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "基于以上的推理服务程序,我们将使用PyTorch镜像和OSS上的模型在PAI创建一个推理服务,代码如下。\n", - "\n", - "> 对于如何使用SDK创建推理服务的详细介绍,请见文档:[创建推理服务](https://help.aliyun.com/document_detail/2261532.html)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.model import container_serving_spec, Model\n", - "from pai.image import retrieve, ImageScope\n", - "from pai.common.utils import random_str\n", - "\n", - "\n", - "# InferenceSpec用于描述如何创建推理服务\n", - "infer_spec = container_serving_spec(\n", - " # 使用PAI提供的最新PyTorch的推理镜像\n", - " image_uri=retrieve(\n", - " \"PyTorch\",\n", - " \"latest\",\n", - " accelerator_type=\"GPU\",\n", - " image_scope=ImageScope.INFERENCE,\n", - " ),\n", - " source_dir=\"./server_src\",\n", - " command=\"python run.py\",\n", - ")\n", - "\n", - "m = Model(\n", - " # 模型的OSS路径,默认模型会通过挂载的方式挂载到`/eas/workspace/model/`路径下。\n", - " model_data=model_uri,\n", - " inference_spec=infer_spec,\n", - ")\n", - "\n", - "\n", - "# 部署模型,创建推理服务.\n", - "p = m.deploy(\n", - " service_name=\"chatglm_demo_{}\".format(random_str(6)),\n", - " instance_type=\"ecs.gn6i-c8g1.2xlarge\", # 8vCPU 31GB NVIDIA T4×1(GPU Mem 16GB)\n", - " options={\n", - " # 配置EAS RPC框架的超时时间, 单位为毫秒\n", - " \"metadata.rpc.keepalive\": 20000,\n", - " },\n", - ")\n", - "\n", - "print(p.service_name)\n", - "print(p.service_status)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "`m.deploy`返回一个Predictor对象,可以用于向创建的推理服务程序发送预测请求。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.predictor import RawResponse\n", - "\n", - "resp: RawResponse = p.raw_predict(\n", - " {\n", - " \"prompt\": \"你好\",\n", - " }\n", - ")\n", - "print(resp.json()[\"response\"])\n", - "\n", - "\n", - "resp = p.raw_predict(\n", - " {\n", - " \"prompt\": \"晚上睡不着应该怎么办\",\n", - " \"history\": resp.json()[\"history\"],\n", - " },\n", - " timeout=20,\n", - ")\n", - "print(resp.json())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "基于以上的推理服务,我们可以使用Gradio创建一个简单的对话机器人demo。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import gradio as gr\n", - "import random\n", - "import time\n", - "\n", - "with gr.Blocks() as demo:\n", - " chatbot = gr.Chatbot()\n", - " msg = gr.Textbox()\n", - " clear = gr.Button(\"Clear\")\n", - " submit = gr.Button(\"Submit\")\n", - "\n", - " def respond(message, chat_history):\n", - "\n", - " print(f\"Message: {message}\")\n", - " print(f\"ChatHistory: {chat_history}\")\n", - " resp = p.raw_predict(\n", - " {\n", - " \"prompt\": message,\n", - " \"history\": chat_history,\n", - " }\n", - " ).json()\n", - " print(f\"Response: {resp['response']}\")\n", - "\n", - " chat_history.append((message, resp[\"response\"]))\n", - " return \"\", chat_history\n", - "\n", - " submit.click(respond, [msg, chatbot], [msg, chatbot])\n", - "\n", - "demo.launch(share=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "通过以上创建的Gradio应用,我们可以在页面上与部署的ChatGLM模型进行对话。\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "在测试完成之后,我们可以通过以下的代码删除推理服务,释放资源。\n", - "\n", - "> 请注意,删除在线推理服务之后,对应的Gradio的应用将无法使用。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "p.delete_service()" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 微调ChatGLM2-6B\n", - "\n", - "我们可以使用领域数据对ChatGLM进行微调,从而使得模型在特定领域和任务下有更好的表现。ChatGLM团队提供了使用[P-Tuning v2](https://github.com/THUDM/P-tuning-v2)方式对模型进行[微调的方案](https://github.com/THUDM/ChatGLM2-6B/tree/main/ptuning),我们将基于此方案展示如何将微调训练作业提交到PAI的训练服务执行。\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 准备训练数据集\n", - "\n", - "我们将使用了[广告生成数据集](https://aclanthology.org/D19-1321.pdf),对ChatGLM进行微调。我们首先需要准备数据到OSS,供后续微调训练作业使用。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.common.oss_utils import download, OssUriObj, upload\n", - "import zipfile\n", - "\n", - "# 下载数据\n", - "data = download(\n", - " # 当前的数据集在上海region,跨region下载,我们需要传递对应OSS Bucket所在Endpoint.\n", - " OssUriObj(\n", - " \"oss://atp-modelzoo-sh.oss-cn-shanghai.aliyuncs.com/release/tutorials/chatGLM/AdvertiseGen_Simple.zip\"\n", - " ),\n", - " local_path=\"./\",\n", - ")\n", - "\n", - "# 解压缩数据\n", - "with zipfile.ZipFile(data, \"r\") as zip_ref:\n", - " zip_ref.extractall(\"./train_data/\")\n", - "\n", - "# 上传数据到OSS\n", - "train_data = \"./train_data/AdvertiseGen_Simple/\"\n", - "train_data_uri = upload(\n", - " \"./train_data/AdvertiseGen_Simple/\", oss_path=\"chatglm_demo/data/advertisegen/\"\n", - ")\n", - "print(train_data_uri)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "相应的数据集数据格式如下:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!head -n 5 ./train_data/AdvertiseGen_Simple/train.json" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 准备微调训练作业脚本\n", - "\n", - "ChatGLM的官方提供[微调训练脚本](https://github.com/THUDM/ChatGLM2-6B/tree/main/ptuning),支持使用P-Tuning v2的方式对ChatGLM模型进行微调。我们将基于相应的微调训练脚本,修改训练作业的拉起Shell脚本(`train.sh`),然后使用PAI Python SDK将微调训练作业提交到PAI执行。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 下载ChatGLM代码\n", - "!git clone https://github.com/THUDM/ChatGLM2-6B.git" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "当训练作业提交到PAI执行时,需要按一定规范读取输入数据,以及将需要保存的模型写出到指定路径下,更加具体介绍请见文档:[提交训练作业](https://help.aliyun.com/document_detail/2261505.html)。\n", - "\n", - "修改后的训练作业拉起脚本如下:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "vscode": { - "languageId": "shellscript" - } - }, - "outputs": [], - "source": [ - "%%writefile ChatGLM2-6B/ptuning/train.sh\n", - "\n", - "PRE_SEQ_LEN=128\n", - "LR=2e-2\n", - "NUM_GPUS=`nvidia-smi --list-gpus | wc -l`\n", - "\n", - "torchrun --standalone --nnodes=1 --nproc-per-node=$NUM_GPUS main.py \\\n", - " --do_train \\\n", - " --train_file /ml/input/data/train/train.json \\\n", - " --validation_file /ml/input/data/train/dev.json \\\n", - " --preprocessing_num_workers 10 \\\n", - " --prompt_column content \\\n", - " --response_column summary \\\n", - " --overwrite_cache \\\n", - " --model_name_or_path /ml/input/data/model \\\n", - " --output_dir /ml/output/model/ \\\n", - " --overwrite_output_dir \\\n", - " --max_source_length 64 \\\n", - " --max_target_length 128 \\\n", - " --per_device_train_batch_size 4 \\\n", - " --per_device_eval_batch_size 4 \\\n", - " --gradient_accumulation_steps 32 \\\n", - " --predict_with_generate \\\n", - " --num_train_epochs 10 \\\n", - " --save_strategy epoch \\\n", - " --learning_rate $LR \\\n", - " --pre_seq_len $PRE_SEQ_LEN \\\n", - " --quantization_bit 4\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "这里我们将使用PAI提供的PyTorch GPU训练镜像运行训练作业,需要安装部分第三方依赖包。用户可以通过提供`requirements.txt`的方式提供,相应的依赖会在训练作业执行前被安装到环境中\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "vscode": { - "languageId": "shellscript" - } - }, - "outputs": [], - "source": [ - "%%writefile ChatGLM2-6B/ptuning/requirements.txt\n", - "# 模型需要的依赖\n", - "transformers==4.30.2\n", - "accelerate\n", - "icetk\n", - "cpm_kernels\n", - "\n", - "torch>=2.0,<2.1\n", - "sentencepiece\n", - "accelerate\n", - "\n", - "rouge_chinese\n", - "nltk\n", - "jieba\n", - "datasets" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 提交训练作业\n", - "\n", - "我们将通过PAI Python SDK,将以上的训练作业提交到PAI执行。SDK在提交训练作业之后,会打印训练作业的链接,用户可以通过对应的链接查看作业的执行详情,输出日志。\n", - "\n", - "> Note:按当前示例教程使用的训练配置、数据集和机器规格,训练作业运行约10分钟左右。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.estimator import Estimator\n", - "from pai.image import retrieve\n", - "\n", - "# 使用PAI提供的最新的PyTorch推理镜像\n", - "image_uri = retrieve(\n", - " \"PyTorch\",\n", - " \"latest\",\n", - " accelerator_type=\"GPU\",\n", - ").image_uri\n", - "\n", - "\n", - "est = Estimator(\n", - " command=\"bash train.sh\", # 启动命令\n", - " source_dir=\"./ChatGLM2-6B/ptuning\", # 训练代码目录.\n", - " image_uri=image_uri, # 训练镜像\n", - " instance_type=\"ecs.gn6e-c12g1.3xlarge\", # 使用的机器规格示例,V100(32G)\n", - " base_job_name=\"chatglm2_finetune_\",\n", - ")\n", - "\n", - "\n", - "# 提交训练作业\n", - "est.fit(\n", - " inputs={\n", - " \"model\": model_uri,\n", - " \"train\": train_data_uri,\n", - " }\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "默认`estimator.fit`会等待到作业执行完成。作业执行成功之后,用户可以通过`est.model_data()`获取输出模型在OSS上的路径地址。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(est.model_data())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "用户可以通过`ossutil`或是SDK提供的便利方法将模型下载到本地:\n", - "\n", - "```python\n", - "from pai.common.oss_util import download\n", - "\n", - "\n", - "# 使用SDK的便利方法下载模型到本地.\n", - "download(\n", - "\toss_path=est.model_data(),\n", - "\tlocal_path=\"./output_model\",\n", - ")\n", - "\n", - "```" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 部署微调之后的模型\n", - "\n", - "微调训练之后获得的`checkpoints`,需要和原始的模型配合一起使用。我们需要通过以下代码获得对应的checkpoint路径.\n", - "\n", - "> 用户通过修改微调训练的代码,使用`Trainer.save_model()`显式的保存相应的checkpoints,则可以直接通过`estimator.model_data()`下获得相应的checkpoints." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "\n", - "# 以上的训练作业超参设置中,我们设置`epochs=2`, checkpoints保存的策略是`每一个epochs保存`。\n", - "# 默认最后一个checkpoint会被保存到`{output_dir}/checkpoint-2`路径下.\n", - "# 通过以下路径,我们可以获得模型训练获得的最后一个checkpoint的OSS路径.\n", - "\n", - "checkpoint_uri = os.path.join(est.model_data(), \"checkpoint-10/\")\n", - "print(checkpoint_uri)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "我们将复用ChatGLM2部署的推理服务程序创建推理服务。与直接部署ChatGLM2的不同点在于我们还需要提供微调之后获得的checkpoints。\n", - "\n", - "通过`InferenceSpec.mount` API,我们可以将相应的OSS模型路径挂载到服务容器中,供推理服务程序使用。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "from pai.model import container_serving_spec, Model\n", - "from pai.image import retrieve, ImageScope\n", - "\n", - "\n", - "# InferenceSpec用于描述如何创建推理服务\n", - "infer_spec = container_serving_spec(\n", - " image_uri=retrieve( # 使用PAI提供的最新PyTorch的推理镜像\n", - " \"PyTorch\",\n", - " \"latest\",\n", - " accelerator_type=\"GPU\",\n", - " image_scope=ImageScope.INFERENCE,\n", - " ),\n", - " source_dir=\"./server_src\", # 代码目录\n", - " command=\"python run.py\", # 启动命令\n", - ")\n", - "\n", - "\n", - "# 将相应的checkpoints挂载到服务中,推理服务的程序通过检查目录(/ml/ptuning_checkpoints/)是否存在加载checkpoints\n", - "infer_spec.mount(checkpoint_uri, \"/ml/ptuning_checkpoints\")\n", - "print(infer_spec.to_dict())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.common.utils import random_str\n", - "\n", - "m = Model(\n", - " model_data=model_uri,\n", - " inference_spec=infer_spec,\n", - ")\n", - "\n", - "# 部署模型\n", - "p = m.deploy(\n", - " service_name=\"chatglm_ft_{}\".format(random_str(6)),\n", - " instance_type=\"ecs.gn6i-c16g1.4xlarge\", # 1 * T4\n", - " options={\n", - " # 配置EAS RPC框架的超时时间, 单位为毫秒\n", - " \"metadata.rpc.keepalive\": 20000,\n", - " },\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "向推理服务发送请求,测试推理服务是否正常启动。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "resp = p.raw_predict(\n", - " {\n", - " \"prompt\": \"你好\",\n", - " },\n", - ")\n", - "print(resp.json())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "基于以上微调后模型的推理服务,我们可以使用Gradio创建一个新的机器人。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import gradio as gr\n", - "import random\n", - "import time\n", - "\n", - "with gr.Blocks() as demo:\n", - " chatbot = gr.Chatbot()\n", - " msg = gr.Textbox()\n", - " clear = gr.Button(\"Clear\")\n", - " submit = gr.Button(\"Submit\")\n", - "\n", - " def respond(message, chat_history):\n", - "\n", - " print(f\"Message: {message}\")\n", - " print(f\"ChatHistory: {chat_history}\")\n", - " resp = p.raw_predict(\n", - " {\n", - " \"prompt\": message,\n", - " \"history\": chat_history,\n", - " }\n", - " ).json()\n", - " print(f\"Response: {resp['response']}\")\n", - "\n", - " chat_history.append((message, resp[\"response\"]))\n", - " return \"\", chat_history\n", - "\n", - " submit.click(respond, [msg, chatbot], [msg, chatbot])\n", - "\n", - "demo.launch(share=True)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "在测试完成之后,可以通过`p.delete_service()`删除服务,释放资源。\n", - "\n", - "> 请注意,删除在线推理服务之后,对应的Gradio的应用将无法使用。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "p.delete_service()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.16" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/docs/source/tutorial/chatglm_finetune/resource/gradio-chatglml.jpg b/docs/source/tutorial/chatglm_finetune/resource/gradio-chatglml.jpg deleted file mode 100644 index 13848a5..0000000 Binary files a/docs/source/tutorial/chatglm_finetune/resource/gradio-chatglml.jpg and /dev/null differ diff --git a/docs/source/tutorial/checkpoint/checkpoint.ipynb b/docs/source/tutorial/checkpoint/checkpoint.ipynb deleted file mode 100644 index 55cfbff..0000000 --- a/docs/source/tutorial/checkpoint/checkpoint.ipynb +++ /dev/null @@ -1,1285 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 在训练作业中使用checkpoint\n", - "\n", - "在机器学习模型训练过程中,往往需要较长的时间完成训练数据的迭代,实现模型的收敛,然而训练过程可能会因为各种原因中断,例如机器故障、网络问题、或是代码原因等。为了避免中断后需要重头开始训练,开发者通常会在训练过程中,定期将模型的状态保存为`checkpoint`文件,以便在训练中断后,能够从保存的`checkpoint`文件获取模型参数,优化器状态,训练步数等训练状态,恢复训练。\n", - "\n", - "本文档介绍如何在PAI的训练作业中使用checkpoint。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "## 准备工作\n", - "\n", - "我们需要首先安装PAI Python SDK以运行本示例。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!python -m pip install --upgrade alipai" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "SDK 需要配置访问阿里云服务需要的 AccessKey,以及当前使用的工作空间和OSS Bucket。在 PAI Python SDK 安装之后,通过在 **命令行终端** 中执行以下命令,按照引导配置密钥,工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "\n", - "我们可以通过以下代码验证当前的配置。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 使用checkpoint保存和恢复训练作业\n", - "\n", - "当使用SDK提供的`pai.estimator.Estimator` 提交训练作业时,训练作业默认会挂载用户的OSS Bucket路径到训练作业的`/ml/output/checkpoints`目录。训练代码可以将checkpoint文件写出到对应的路径,从而保存到OSS中。提交训练作业之后,可以通过 `estimator.checkpoints_data()` 方法可以获取`checkpoints`保存的OSS路径。\n", - "\n", - "当需要使用已有的`checkpoint`时,用户可以通过 `checkpoints_path` 参数指定一个OSS Bucket路径,PAI会将该路径挂载到训练作业的`/ml/output/checkpoints`目录,训练作业可以通过读取对应数据路径下的checkpoint文件来恢复训练。\n", - "\n", - "\n", - "\n", - "```python\n", - "\n", - "from pai.estimator import Estimator\n", - "\n", - "\n", - "# 1. 使用默认的checkpoints路径保存模型的checkpoints\n", - "est = Estimator(\n", - "\timage_uri=\"\",\n", - "\tcommand=\"python train.py\",\n", - ")\n", - "\n", - "# 训练作业默认会挂载一个OSS Bucket路径到 /ml/output/checkpoints\n", - "# 用户训练代码可以通过写文件到 /ml/output/checkpoints 保存checkpoint\n", - "est.fit()\n", - "\n", - "# 查看训练作业的checkpoints路径\n", - "print(est.checkpoints_data())\n", - "\n", - "# 2. 使用其他训练作业产出的checkpoints恢复训练\n", - "est_load = Estimator(\n", - "\timage_uri=\"\",\n", - "\tcommand=\"python train.py\",\n", - "\t# 指定使用上一个训练作业输出的checkpoints.\n", - "\tcheckpoints_path=est.checkpoints_data(),\n", - ")\n", - "\n", - "# 训练代码从 /ml/output/checkpoints 中加载checkpoint\n", - "est_load.fit()\n", - "\n", - "```\n", - "\n", - "\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 在PyTorch中使用checkpoint\n", - "\n", - "在PyTorch中,通常使用`torch.save`方法将模型的参数、优化器的状态、训练进度等信息,以字典的形式作为`checkpoint`进行保存。保存的`checkpoint`文件可以通过 `torch.load` 进行加载。PyTorch提供了如何在训练中保存和加载checkpoint的教程:[Save And Loading A General Checkpoint In PyTorch](https://pytorch.org/tutorials/recipes/recipes/saving_and_loading_a_general_checkpoint.html)。\n", - "\n", - "我们将基于PyTorch的示例教程,演示如何在PAI的训练作业中使用checkpoint。\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "训练作业使用的代码如下:\n", - "\n", - "1. 在训练开始之前,通过 `/ml/output/checkpoints/` 路径加载checkpoint获取初始化模型参数,优化器,以及训练进度。\n", - "\n", - "2. 基于checkpoint的状态信息训练模型,在训练过程中,定期保存checkpoint到 `/ml/output/checkpoints/` 路径。\n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!mkdir -p train_src" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile train_src/train.py\n", - "# Additional information\n", - "import os\n", - "import torch\n", - "import torch.nn as nn\n", - "import torch.optim as optim\n", - "from torch.utils.data import Dataset, DataLoader\n", - "import torch.nn.functional as F\n", - "\n", - "\n", - "EPOCH = 5\n", - "CHECKPOINT_NAME = \"checkpoint.pt\"\n", - "LOSS = 0.4\n", - "\n", - "# Define a custom mock dataset\n", - "class RandomDataset(Dataset):\n", - " def __init__(self, num_samples=1000):\n", - " self.num_samples = num_samples\n", - "\n", - " def __len__(self):\n", - " return self.num_samples\n", - "\n", - " def __getitem__(self, idx):\n", - " x = torch.randn(10) # Generating random input tensor\n", - " y = torch.randint(0, 2, (1,)).item() # Generating random target label (0 or 1)\n", - " return x, y\n", - "\n", - "\n", - "# Define your model\n", - "class MyModel(nn.Module):\n", - " def __init__(self):\n", - " super(MyModel, self).__init__()\n", - " self.fc = nn.Linear(10, 2)\n", - " \n", - " def forward(self, x):\n", - " return self.fc(x)\n", - "\n", - "\n", - "net = MyModel()\n", - "criterion = nn.CrossEntropyLoss()\n", - "optimizer = optim.SGD(net.parameters(), lr=0.001)\n", - "start_epoch = 0\n", - "\n", - "def load_checkpoint():\n", - " \"\"\"Load checkpoint if exists.\"\"\"\n", - " global net, optimizer, start_epoch, LOSS\n", - " checkpoint_dir = os.environ.get(\"PAI_OUTPUT_CHECKPOINTS\")\n", - " if not checkpoint_dir:\n", - " return\n", - " checkpoint_path = os.path.join(checkpoint_dir, CHECKPOINT_NAME)\n", - " if not os.path.exists(checkpoint_path):\n", - " return\n", - " data = torch.load(checkpoint_path)\n", - "\n", - " net.load_state_dict(data[\"model_state_dict\"])\n", - " optimizer.load_state_dict(data[\"optimizer_state_dict\"])\n", - " start_epoch = data[\"epoch\"]\n", - "\n", - "\n", - "def save_checkpoint(epoch):\n", - " global net, optimizer, start_epoch, LOSS\n", - " checkpoint_dir = os.environ.get(\"PAI_OUTPUT_CHECKPOINTS\")\n", - " if not checkpoint_dir:\n", - " return\n", - " checkpoint_path = os.path.join(checkpoint_dir, CHECKPOINT_NAME)\n", - " torch.save({\n", - " 'epoch': epoch + 1,\n", - " 'model_state_dict': net.state_dict(),\n", - " 'optimizer_state_dict': optimizer.state_dict(),\n", - " }, checkpoint_path)\n", - "\n", - "\n", - "def parse_args():\n", - " import argparse\n", - " parser = argparse.ArgumentParser()\n", - " parser.add_argument(\"--epochs\", type=int, default=10)\n", - " args = parser.parse_args()\n", - " return args\n", - "\n", - "\n", - "def train():\n", - " args = parse_args()\n", - " load_checkpoint()\n", - " batch_size = 4\n", - " dataloader = DataLoader(RandomDataset(), batch_size=batch_size, shuffle=True)\n", - " num_epochs = args.epochs\n", - " print(num_epochs)\n", - " for epoch in range(start_epoch, num_epochs):\n", - " net.train()\n", - " for i, (inputs, targets) in enumerate(dataloader):\n", - " # Forward pass\n", - " outputs = net(inputs)\n", - " loss = criterion(outputs, targets)\n", - " \n", - " # Backward pass and optimization\n", - " optimizer.zero_grad()\n", - " loss.backward()\n", - " optimizer.step()\n", - " \n", - " # Print training progress\n", - " if (i+1) % 10 == 0:\n", - " print(f'Epoch [{epoch+1}/{num_epochs}], Step [{i+1}/{len(dataloader)}], Loss: {loss.item()}')\n", - " \n", - " # Save checkpoint\n", - " save_checkpoint(epoch=epoch)\n", - " # save the model\n", - " torch.save(net.state_dict(), os.path.join(os.environ.get(\"PAI_OUTPUT_MODEL\", \".\"), \"model.pt\"))\n", - " \n", - "\n", - "\n", - "if __name__ == \"__main__\":\n", - " train()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "我们将以上的代码提交到PAI执行,训练作业最终提供挂载的OSS路径保存模型。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "keep_output" - ] - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/liangquan/code/pypai/pai/common/oss_utils.py:13: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n", - " from tqdm.autonotebook import tqdm\n" - ] - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "d8266991a0d042c6a54531f252ecc727", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Uploading file: /var/folders/hc/5w4bg25j1ns2mm0yb06zzzbh0000gp/T/tmpt3_0rsuf/source.tar.gz: 0%| | 0…" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "View the job detail by accessing the console URI: https://pai.console.aliyun.com/?regionId=cn-hangzhou&workspaceId=58670#/training/jobs/train1u1it512gqg\n", - "TrainingJob launch starting\n", - "MAX_PARALLELISM=0\n", - "C_INCLUDE_PATH=/home/pai/include\n", - "KUBERNETES_PORT=tcp://10.192.0.1:443\n", - "KUBERNETES_SERVICE_PORT=443\n", - "LANGUAGE=en_US.UTF-8\n", - "PIP_TRUSTED_HOST=mirrors.cloud.aliyuncs.com\n", - "MASTER_ADDR=train1u1it512gqg-master-0\n", - "HOSTNAME=train1u1it512gqg-master-0\n", - "LD_LIBRARY_PATH=:/lib/x86_64-linux-gnu:/home/pai/lib:/home/pai/jre/lib/amd64/server\n", - "MASTER_PORT=23456\n", - "HOME=/root\n", - "PAI_USER_ARGS=\n", - "PYTHONUNBUFFERED=0\n", - "PAI_OUTPUT_CHECKPOINTS=/ml/output/checkpoints/\n", - "PAI_CONFIG_DIR=/ml/input/config/\n", - "WORLD_SIZE=1\n", - "REGION_ID=cn-hangzhou\n", - "CPLUS_INCLUDE_PATH=/home/pai/include\n", - "RANK=0\n", - "OPAL_PREFIX=/home/pai/\n", - "PAI_TRAINING_JOB_ID=train1u1it512gqg\n", - "TERM=xterm-color\n", - "KUBERNETES_PORT_443_TCP_ADDR=10.192.0.1\n", - "PAI_OUTPUT_MODEL=/ml/output/model/\n", - "ELASTIC_TRAINING_ENABLED=false\n", - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/home/pai/bin:/home/pai/hadoop/bin\n", - "PIP_INDEX_URL=https://mirrors.cloud.aliyuncs.com/pypi/simple\n", - "KUBERNETES_PORT_443_TCP_PORT=443\n", - "KUBERNETES_PORT_443_TCP_PROTO=tcp\n", - "LANG=en_US.UTF-8\n", - "aliyun_logs_containerType_tags=containerType=Algorithm\n", - "PAI_TRAINING_USE_ECI=true\n", - "KUBERNETES_SERVICE_PORT_HTTPS=443\n", - "KUBERNETES_PORT_443_TCP=tcp://10.192.0.1:443\n", - "ELASTIC_INFERENCE_ENABLED=false\n", - "LC_ALL=en_US.UTF-8\n", - "JAVA_HOME=/home/pai\n", - "KUBERNETES_SERVICE_HOST=10.192.0.1\n", - "PWD=/\n", - "PAI_HPS={}\n", - "TZ=UTC\n", - "HADOOP_HOME=/home/pai/hadoop\n", - "PAI_OUTPUT_LOGS=/ml/output/logs/\n", - "aliyun_logs_trainingJobId_tags=trainingJobId=train1u1it512gqg\n", - "PAI_ODPS_CREDENTIAL=/ml/input/credential/odps.json\n", - "PAI_WORKING_DIR=/ml/usercode/\n", - "Change to Working Directory, /ml/usercode/\n", - "User program launching\n", - "-----------------------------------------------------------------\n", - "10\n", - "Epoch [1/10], Step [10/250], Loss: 0.3664854168891907\n", - "Epoch [1/10], Step [20/250], Loss: 0.5867650508880615\n", - "Epoch [1/10], Step [30/250], Loss: 0.8810225129127502\n", - "Epoch [1/10], Step [40/250], Loss: 1.3596220016479492\n", - "Epoch [1/10], Step [50/250], Loss: 1.0757191181182861\n", - "Epoch [1/10], Step [60/250], Loss: 0.5261836051940918\n", - "Epoch [1/10], Step [70/250], Loss: 1.0891999006271362\n", - "Epoch [1/10], Step [80/250], Loss: 1.2425217628479004\n", - "Epoch [1/10], Step [90/250], Loss: 0.7928518652915955\n", - "Epoch [1/10], Step [100/250], Loss: 0.500701367855072\n", - "Epoch [1/10], Step [110/250], Loss: 1.1105762720108032\n", - "Epoch [1/10], Step [120/250], Loss: 0.7642831802368164\n", - "Epoch [1/10], Step [130/250], Loss: 0.9435116052627563\n", - "Epoch [1/10], Step [140/250], Loss: 0.4632255434989929\n", - "Epoch [1/10], Step [150/250], Loss: 0.8282555937767029\n", - "Epoch [1/10], Step [160/250], Loss: 0.5644117593765259\n", - "Epoch [1/10], Step [170/250], Loss: 0.8821360468864441\n", - "Epoch [1/10], Step [180/250], Loss: 0.6495410799980164\n", - "Epoch [1/10], Step [190/250], Loss: 0.6814499497413635\n", - "Epoch [1/10], Step [200/250], Loss: 1.1818656921386719\n", - "Epoch [1/10], Step [210/250], Loss: 0.4218548536300659\n", - "Epoch [1/10], Step [220/250], Loss: 0.5892952680587769\n", - "Epoch [1/10], Step [230/250], Loss: 0.8104468584060669\n", - "Epoch [1/10], Step [240/250], Loss: 0.3310832977294922\n", - "Epoch [1/10], Step [250/250], Loss: 1.0296210050582886\n", - "Epoch [2/10], Step [10/250], Loss: 0.747037947177887\n", - "Epoch [2/10], Step [20/250], Loss: 1.0555682182312012\n", - "Epoch [2/10], Step [30/250], Loss: 0.5005624294281006\n", - "Epoch [2/10], Step [40/250], Loss: 0.6007864475250244\n", - "Epoch [2/10], Step [50/250], Loss: 0.8172819018363953\n", - "Epoch [2/10], Step [60/250], Loss: 0.7322960495948792\n", - "Epoch [2/10], Step [70/250], Loss: 0.6178841590881348\n", - "Epoch [2/10], Step [80/250], Loss: 0.9776118993759155\n", - "Epoch [2/10], Step [90/250], Loss: 0.8088865876197815\n", - "Epoch [2/10], Step [100/250], Loss: 0.7169486284255981\n", - "Epoch [2/10], Step [110/250], Loss: 0.8003190159797668\n", - "Epoch [2/10], Step [120/250], Loss: 0.9178279638290405\n", - "Epoch [2/10], Step [130/250], Loss: 0.5217956900596619\n", - "Epoch [2/10], Step [140/250], Loss: 1.2751939296722412\n", - "Epoch [2/10], Step [150/250], Loss: 1.1024904251098633\n", - "Epoch [2/10], Step [160/250], Loss: 0.6336060762405396\n", - "Epoch [2/10], Step [170/250], Loss: 0.799022376537323\n", - "Epoch [2/10], Step [180/250], Loss: 0.7938567996025085\n", - "Epoch [2/10], Step [190/250], Loss: 1.060591220855713\n", - "Epoch [2/10], Step [200/250], Loss: 0.9365970492362976\n", - "Epoch [2/10], Step [210/250], Loss: 0.6945515871047974\n", - "Epoch [2/10], Step [220/250], Loss: 0.4772261381149292\n", - "Epoch [2/10], Step [230/250], Loss: 1.0332412719726562\n", - "Epoch [2/10], Step [240/250], Loss: 0.7284632325172424\n", - "Epoch [2/10], Step [250/250], Loss: 0.4485410451889038\n", - "Epoch [3/10], Step [10/250], Loss: 0.7845520377159119\n", - "Epoch [3/10], Step [20/250], Loss: 0.5619648694992065\n", - "Epoch [3/10], Step [30/250], Loss: 0.725273609161377\n", - "Epoch [3/10], Step [40/250], Loss: 0.7783026695251465\n", - "Epoch [3/10], Step [50/250], Loss: 0.5168777704238892\n", - "Epoch [3/10], Step [60/250], Loss: 0.67060387134552\n", - "Epoch [3/10], Step [70/250], Loss: 0.9300781488418579\n", - "Epoch [3/10], Step [80/250], Loss: 0.6534505486488342\n", - "Epoch [3/10], Step [90/250], Loss: 0.557340681552887\n", - "Epoch [3/10], Step [100/250], Loss: 0.667724609375\n", - "Epoch [3/10], Step [110/250], Loss: 0.5125826001167297\n", - "Epoch [3/10], Step [120/250], Loss: 0.4494149088859558\n", - "Epoch [3/10], Step [130/250], Loss: 0.6902559995651245\n", - "Epoch [3/10], Step [140/250], Loss: 0.5450549125671387\n", - "Epoch [3/10], Step [150/250], Loss: 1.0632681846618652\n", - "Epoch [3/10], Step [160/250], Loss: 0.7964761257171631\n", - "Epoch [3/10], Step [170/250], Loss: 0.5218536257743835\n", - "Epoch [3/10], Step [180/250], Loss: 0.6972622275352478\n", - "Epoch [3/10], Step [190/250], Loss: 0.7963941097259521\n", - "Epoch [3/10], Step [200/250], Loss: 0.5798731446266174\n", - "Epoch [3/10], Step [210/250], Loss: 0.7930802702903748\n", - "Epoch [3/10], Step [220/250], Loss: 0.7618649005889893\n", - "Epoch [3/10], Step [230/250], Loss: 0.9831617474555969\n", - "Epoch [3/10], Step [240/250], Loss: 0.7935497164726257\n", - "Epoch [3/10], Step [250/250], Loss: 0.9747794270515442\n", - "Epoch [4/10], Step [10/250], Loss: 0.6432996392250061\n", - "Epoch [4/10], Step [20/250], Loss: 0.6515889167785645\n", - "Epoch [4/10], Step [30/250], Loss: 0.8191264867782593\n", - "Epoch [4/10], Step [40/250], Loss: 0.5717310905456543\n", - "Epoch [4/10], Step [50/250], Loss: 1.0365064144134521\n", - "Epoch [4/10], Step [60/250], Loss: 0.7181562185287476\n", - "Epoch [4/10], Step [70/250], Loss: 0.6014276146888733\n", - "Epoch [4/10], Step [80/250], Loss: 0.8743482232093811\n", - "Epoch [4/10], Step [90/250], Loss: 0.5963127613067627\n", - "Epoch [4/10], Step [100/250], Loss: 0.7012943029403687\n", - "Epoch [4/10], Step [110/250], Loss: 0.6271654367446899\n", - "Epoch [4/10], Step [120/250], Loss: 0.646144449710846\n", - "Epoch [4/10], Step [130/250], Loss: 0.5112266540527344\n", - "Epoch [4/10], Step [140/250], Loss: 0.8657329678535461\n", - "Epoch [4/10], Step [150/250], Loss: 0.677897572517395\n", - "Epoch [4/10], Step [160/250], Loss: 0.798669695854187\n", - "Epoch [4/10], Step [170/250], Loss: 0.805213451385498\n", - "Epoch [4/10], Step [180/250], Loss: 0.7744658589363098\n", - "Epoch [4/10], Step [190/250], Loss: 0.4748728275299072\n", - "Epoch [4/10], Step [200/250], Loss: 0.6623726487159729\n", - "Epoch [4/10], Step [210/250], Loss: 0.6851851940155029\n", - "Epoch [4/10], Step [220/250], Loss: 0.5917701721191406\n", - "Epoch [4/10], Step [230/250], Loss: 0.586968719959259\n", - "Epoch [4/10], Step [240/250], Loss: 0.758073091506958\n", - "Epoch [4/10], Step [250/250], Loss: 0.7908360958099365\n", - "Epoch [5/10], Step [10/250], Loss: 0.747495174407959\n", - "Epoch [5/10], Step [20/250], Loss: 0.7880417108535767\n", - "Epoch [5/10], Step [30/250], Loss: 1.4239259958267212\n", - "Epoch [5/10], Step [40/250], Loss: 0.709957480430603\n", - "Epoch [5/10], Step [50/250], Loss: 0.45279955863952637\n", - "Epoch [5/10], Step [60/250], Loss: 0.6855078935623169\n", - "Epoch [5/10], Step [70/250], Loss: 0.7050631046295166\n", - "Epoch [5/10], Step [80/250], Loss: 0.8256967067718506\n", - "Epoch [5/10], Step [90/250], Loss: 0.9627029895782471\n", - "Epoch [5/10], Step [100/250], Loss: 0.7069070339202881\n", - "Epoch [5/10], Step [110/250], Loss: 0.6772119998931885\n", - "Epoch [5/10], Step [120/250], Loss: 0.5547316670417786\n", - "Epoch [5/10], Step [130/250], Loss: 0.4749568998813629\n", - "Epoch [5/10], Step [140/250], Loss: 0.5910231471061707\n", - "Epoch [5/10], Step [150/250], Loss: 0.5789163112640381\n", - "Epoch [5/10], Step [160/250], Loss: 0.994613766670227\n", - "Epoch [5/10], Step [170/250], Loss: 0.7664419412612915\n", - "Epoch [5/10], Step [180/250], Loss: 0.7812412977218628\n", - "Epoch [5/10], Step [190/250], Loss: 0.932634174823761\n", - "Epoch [5/10], Step [200/250], Loss: 0.4732060134410858\n", - "Epoch [5/10], Step [210/250], Loss: 0.6712639927864075\n", - "Epoch [5/10], Step [220/250], Loss: 0.7019771337509155\n", - "Epoch [5/10], Step [230/250], Loss: 0.668921709060669\n", - "Epoch [5/10], Step [240/250], Loss: 0.5486156344413757\n", - "Epoch [5/10], Step [250/250], Loss: 0.8131189346313477\n", - "Epoch [6/10], Step [10/250], Loss: 0.5800281167030334\n", - "Epoch [6/10], Step [20/250], Loss: 0.9032570719718933\n", - "Epoch [6/10], Step [30/250], Loss: 0.6829659938812256\n", - "Epoch [6/10], Step [40/250], Loss: 0.577970027923584\n", - "Epoch [6/10], Step [50/250], Loss: 0.9745671153068542\n", - "Epoch [6/10], Step [60/250], Loss: 0.6292040348052979\n", - "Epoch [6/10], Step [70/250], Loss: 0.9189562201499939\n", - "Epoch [6/10], Step [80/250], Loss: 1.0687212944030762\n", - "Epoch [6/10], Step [90/250], Loss: 0.6210573315620422\n", - "Epoch [6/10], Step [100/250], Loss: 0.7758654356002808\n", - "Epoch [6/10], Step [110/250], Loss: 1.055539846420288\n", - "Epoch [6/10], Step [120/250], Loss: 0.7991855144500732\n", - "Epoch [6/10], Step [130/250], Loss: 0.8390480279922485\n", - "Epoch [6/10], Step [140/250], Loss: 0.5641282200813293\n", - "Epoch [6/10], Step [150/250], Loss: 0.5416208505630493\n", - "Epoch [6/10], Step [160/250], Loss: 0.8556939363479614\n", - "Epoch [6/10], Step [170/250], Loss: 0.8848042488098145\n", - "Epoch [6/10], Step [180/250], Loss: 0.6585526466369629\n", - "Epoch [6/10], Step [190/250], Loss: 0.5264347791671753\n", - "Epoch [6/10], Step [200/250], Loss: 0.7451325058937073\n", - "Epoch [6/10], Step [210/250], Loss: 0.8498039841651917\n", - "Epoch [6/10], Step [220/250], Loss: 0.9514821767807007\n", - "Epoch [6/10], Step [230/250], Loss: 0.5831080675125122\n", - "Epoch [6/10], Step [240/250], Loss: 0.7323013544082642\n", - "Epoch [6/10], Step [250/250], Loss: 0.799047589302063\n", - "Epoch [7/10], Step [10/250], Loss: 0.7431624531745911\n", - "Epoch [7/10], Step [20/250], Loss: 0.7462856769561768\n", - "Epoch [7/10], Step [30/250], Loss: 0.7637103796005249\n", - "Epoch [7/10], Step [40/250], Loss: 0.7512863874435425\n", - "Epoch [7/10], Step [50/250], Loss: 0.8934370279312134\n", - "Epoch [7/10], Step [60/250], Loss: 0.6657339334487915\n", - "Epoch [7/10], Step [70/250], Loss: 0.7996265292167664\n", - "Epoch [7/10], Step [80/250], Loss: 0.7883811593055725\n", - "Epoch [7/10], Step [90/250], Loss: 0.7327611446380615\n", - "Epoch [7/10], Step [100/250], Loss: 0.7103905081748962\n", - "Epoch [7/10], Step [110/250], Loss: 0.8145009875297546\n", - "Epoch [7/10], Step [120/250], Loss: 0.6999544501304626\n", - "Epoch [7/10], Step [130/250], Loss: 0.6132965087890625\n", - "Epoch [7/10], Step [140/250], Loss: 0.8219666481018066\n", - "Epoch [7/10], Step [150/250], Loss: 0.573877215385437\n", - "Epoch [7/10], Step [160/250], Loss: 0.864593505859375\n", - "Epoch [7/10], Step [170/250], Loss: 0.7187140583992004\n", - "Epoch [7/10], Step [180/250], Loss: 0.601334810256958\n", - "Epoch [7/10], Step [190/250], Loss: 0.6193158626556396\n", - "Epoch [7/10], Step [200/250], Loss: 0.7600311040878296\n", - "Epoch [7/10], Step [210/250], Loss: 0.6659085154533386\n", - "Epoch [7/10], Step [220/250], Loss: 0.6364413499832153\n", - "Epoch [7/10], Step [230/250], Loss: 0.878304123878479\n", - "Epoch [7/10], Step [240/250], Loss: 0.7139410972595215\n", - "Epoch [7/10], Step [250/250], Loss: 0.6852972507476807\n", - "Epoch [8/10], Step [10/250], Loss: 1.0263853073120117\n", - "Epoch [8/10], Step [20/250], Loss: 0.7559791803359985\n", - "Epoch [8/10], Step [30/250], Loss: 0.6709325313568115\n", - "Epoch [8/10], Step [40/250], Loss: 0.5146634578704834\n", - "Epoch [8/10], Step [50/250], Loss: 0.6418485641479492\n", - "Epoch [8/10], Step [60/250], Loss: 0.72318035364151\n", - "Epoch [8/10], Step [70/250], Loss: 0.7116968631744385\n", - "Epoch [8/10], Step [80/250], Loss: 0.7035868763923645\n", - "Epoch [8/10], Step [90/250], Loss: 0.6085933446884155\n", - "Epoch [8/10], Step [100/250], Loss: 0.5128545761108398\n", - "Epoch [8/10], Step [110/250], Loss: 0.6380510330200195\n", - "Epoch [8/10], Step [120/250], Loss: 0.4963105320930481\n", - "Epoch [8/10], Step [130/250], Loss: 0.6693160533905029\n", - "Epoch [8/10], Step [140/250], Loss: 0.6602588891983032\n", - "Epoch [8/10], Step [150/250], Loss: 0.8440876007080078\n", - "Epoch [8/10], Step [160/250], Loss: 0.7596740126609802\n", - "Epoch [8/10], Step [170/250], Loss: 0.695992112159729\n", - "Epoch [8/10], Step [180/250], Loss: 0.6737014651298523\n", - "Epoch [8/10], Step [190/250], Loss: 0.6722623705863953\n", - "Epoch [8/10], Step [200/250], Loss: 0.5857406854629517\n", - "Epoch [8/10], Step [210/250], Loss: 0.9563039541244507\n", - "Epoch [8/10], Step [220/250], Loss: 0.7375826835632324\n", - "Epoch [8/10], Step [230/250], Loss: 0.8751094341278076\n", - "Epoch [8/10], Step [240/250], Loss: 0.7180076837539673\n", - "Epoch [8/10], Step [250/250], Loss: 0.6384711861610413\n", - "Epoch [9/10], Step [10/250], Loss: 0.6789698004722595\n", - "Epoch [9/10], Step [20/250], Loss: 0.6645065546035767\n", - "Epoch [9/10], Step [30/250], Loss: 0.6996726989746094\n", - "Epoch [9/10], Step [40/250], Loss: 0.7402397394180298\n", - "Epoch [9/10], Step [50/250], Loss: 0.6388964653015137\n", - "Epoch [9/10], Step [60/250], Loss: 0.9401450753211975\n", - "Epoch [9/10], Step [70/250], Loss: 0.6708970665931702\n", - "Epoch [9/10], Step [80/250], Loss: 0.728550136089325\n", - "Epoch [9/10], Step [90/250], Loss: 0.7362596988677979\n", - "Epoch [9/10], Step [100/250], Loss: 0.7750495672225952\n", - "Epoch [9/10], Step [110/250], Loss: 0.807244062423706\n", - "Epoch [9/10], Step [120/250], Loss: 0.754521369934082\n", - "Epoch [9/10], Step [130/250], Loss: 0.5469345450401306\n", - "Epoch [9/10], Step [140/250], Loss: 0.8965460062026978\n", - "Epoch [9/10], Step [150/250], Loss: 0.7952369451522827\n", - "Epoch [9/10], Step [160/250], Loss: 0.6263578534126282\n", - "Epoch [9/10], Step [170/250], Loss: 0.5788871049880981\n", - "Epoch [9/10], Step [180/250], Loss: 0.7363749146461487\n", - "Epoch [9/10], Step [190/250], Loss: 0.7322844862937927\n", - "Epoch [9/10], Step [200/250], Loss: 0.6707043051719666\n", - "Epoch [9/10], Step [210/250], Loss: 0.7251213192939758\n", - "Epoch [9/10], Step [220/250], Loss: 0.6435517072677612\n", - "Epoch [9/10], Step [230/250], Loss: 0.534774124622345\n", - "Epoch [9/10], Step [240/250], Loss: 0.6989405751228333\n", - "Epoch [9/10], Step [250/250], Loss: 0.7413943409919739\n", - "Epoch [10/10], Step [10/250], Loss: 0.6014090776443481\n", - "Epoch [10/10], Step [20/250], Loss: 0.8173813819885254\n", - "Epoch [10/10], Step [30/250], Loss: 0.8984671235084534\n", - "Epoch [10/10], Step [40/250], Loss: 0.6354056000709534\n", - "Epoch [10/10], Step [50/250], Loss: 0.7964866757392883\n", - "Epoch [10/10], Step [60/250], Loss: 0.7849454879760742\n", - "Epoch [10/10], Step [70/250], Loss: 0.5637381076812744\n", - "Epoch [10/10], Step [80/250], Loss: 0.7669687271118164\n", - "Epoch [10/10], Step [90/250], Loss: 0.6140038371086121\n", - "Epoch [10/10], Step [100/250], Loss: 0.7134058475494385\n", - "Epoch [10/10], Step [110/250], Loss: 0.6768066883087158\n", - "Epoch [10/10], Step [120/250], Loss: 0.6304113268852234\n", - "Epoch [10/10], Step [130/250], Loss: 0.7426990866661072\n", - "Epoch [10/10], Step [140/250], Loss: 0.7469097971916199\n", - "Epoch [10/10], Step [150/250], Loss: 0.7591947913169861\n", - "Epoch [10/10], Step [160/250], Loss: 0.7327935099601746\n", - "Epoch [10/10], Step [170/250], Loss: 0.8590223789215088\n", - "Epoch [10/10], Step [180/250], Loss: 0.6994909644126892\n", - "Epoch [10/10], Step [190/250], Loss: 0.8262240886688232\n", - "Epoch [10/10], Step [200/250], Loss: 0.6071692109107971\n", - "Epoch [10/10], Step [210/250], Loss: 0.915013313293457\n", - "Epoch [10/10], Step [220/250], Loss: 0.8758894205093384\n", - "Epoch [10/10], Step [230/250], Loss: 0.6473208665847778\n", - "Epoch [10/10], Step [240/250], Loss: 0.6843898296356201\n", - "Epoch [10/10], Step [250/250], Loss: 0.6645953059196472\n", - "\n", - "Training job (train1u1it512gqg) succeeded, you can check the logs/metrics/output in the console:\n", - "https://pai.console.aliyun.com/?regionId=cn-hangzhou&workspaceId=58670#/training/jobs/train1u1it512gqg\n" - ] - } - ], - "source": [ - "from pai.estimator import Estimator\n", - "from pai.image import retrieve\n", - "\n", - "\n", - "epochs = 10\n", - "\n", - "\n", - "# 训练作业默认会挂载一个OSS Bucket路径到 /ml/output/checkpoints/\n", - "est = Estimator(\n", - " command=\"python train.py --epochs {}\".format(epochs),\n", - " source_dir=\"./train_src/\",\n", - " image_uri=retrieve(\"PyTorch\", \"latest\").image_uri,\n", - " instance_type=\"ecs.c6.large\",\n", - " base_job_name=\"torch_checkpoint\",\n", - ")\n", - "\n", - "est.fit()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 训练作业的checkpoints目录\n", - "print(est.checkpoints_data())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "以上训练作业对训练数据做了10次迭代,通过使用checkpoint,我们可以在原先模型的基础上继续训练,例如使用训练数据继续迭代20次迭代。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "keep_output" - ] - }, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "1465353ea22d4b9a86f7b5b892f23471", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Uploading file: /var/folders/hc/5w4bg25j1ns2mm0yb06zzzbh0000gp/T/tmpshzpdx_z/source.tar.gz: 0%| | 0…" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "View the job detail by accessing the console URI: https://pai.console.aliyun.com/?regionId=cn-hangzhou&workspaceId=58670#/training/jobs/trainu90lc57j1vm\n", - "TrainingJob launch starting\n", - "MAX_PARALLELISM=0\n", - "C_INCLUDE_PATH=/home/pai/include\n", - "KUBERNETES_SERVICE_PORT=443\n", - "KUBERNETES_PORT=tcp://10.192.0.1:443\n", - "LANGUAGE=en_US.UTF-8\n", - "PIP_TRUSTED_HOST=mirrors.cloud.aliyuncs.com\n", - "MASTER_ADDR=trainu90lc57j1vm-master-0\n", - "HOSTNAME=trainu90lc57j1vm-master-0\n", - "LD_LIBRARY_PATH=:/lib/x86_64-linux-gnu:/home/pai/lib:/home/pai/jre/lib/amd64/server\n", - "MASTER_PORT=23456\n", - "HOME=/root\n", - "PAI_USER_ARGS=\n", - "PYTHONUNBUFFERED=0\n", - "PAI_OUTPUT_CHECKPOINTS=/ml/output/checkpoints/\n", - "PAI_CONFIG_DIR=/ml/input/config/\n", - "WORLD_SIZE=1\n", - "REGION_ID=cn-hangzhou\n", - "CPLUS_INCLUDE_PATH=/home/pai/include\n", - "RANK=0\n", - "OPAL_PREFIX=/home/pai/\n", - "PAI_TRAINING_JOB_ID=trainu90lc57j1vm\n", - "TERM=xterm-color\n", - "KUBERNETES_PORT_443_TCP_ADDR=10.192.0.1\n", - "PAI_OUTPUT_MODEL=/ml/output/model/\n", - "ELASTIC_TRAINING_ENABLED=false\n", - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/home/pai/bin:/home/pai/hadoop/bin\n", - "PIP_INDEX_URL=https://mirrors.cloud.aliyuncs.com/pypi/simple\n", - "KUBERNETES_PORT_443_TCP_PORT=443\n", - "KUBERNETES_PORT_443_TCP_PROTO=tcp\n", - "LANG=en_US.UTF-8\n", - "PAI_TRAINING_USE_ECI=true\n", - "aliyun_logs_containerType_tags=containerType=Algorithm\n", - "KUBERNETES_PORT_443_TCP=tcp://10.192.0.1:443\n", - "KUBERNETES_SERVICE_PORT_HTTPS=443\n", - "ELASTIC_INFERENCE_ENABLED=false\n", - "LC_ALL=en_US.UTF-8\n", - "JAVA_HOME=/home/pai\n", - "KUBERNETES_SERVICE_HOST=10.192.0.1\n", - "PWD=/\n", - "PAI_HPS={}\n", - "TZ=UTC\n", - "HADOOP_HOME=/home/pai/hadoop\n", - "PAI_OUTPUT_LOGS=/ml/output/logs/\n", - "aliyun_logs_trainingJobId_tags=trainingJobId=trainu90lc57j1vm\n", - "PAI_ODPS_CREDENTIAL=/ml/input/credential/odps.json\n", - "PAI_WORKING_DIR=/ml/usercode/\n", - "Change to Working Directory, /ml/usercode/\n", - "User program launching\n", - "-----------------------------------------------------------------\n", - "30\n", - "Epoch [11/30], Step [10/250], Loss: 0.678845226764679\n", - "Epoch [11/30], Step [20/250], Loss: 0.6292213201522827\n", - "Epoch [11/30], Step [30/250], Loss: 0.6856911182403564\n", - "Epoch [11/30], Step [40/250], Loss: 0.6147192716598511\n", - "Epoch [11/30], Step [50/250], Loss: 0.7846511602401733\n", - "Epoch [11/30], Step [60/250], Loss: 0.6719473004341125\n", - "Epoch [11/30], Step [70/250], Loss: 0.8227031826972961\n", - "Epoch [11/30], Step [80/250], Loss: 0.7861220836639404\n", - "Epoch [11/30], Step [90/250], Loss: 0.7436649203300476\n", - "Epoch [11/30], Step [100/250], Loss: 0.8053247928619385\n", - "Epoch [11/30], Step [110/250], Loss: 0.716484546661377\n", - "Epoch [11/30], Step [120/250], Loss: 0.6527263522148132\n", - "Epoch [11/30], Step [130/250], Loss: 0.7980918884277344\n", - "Epoch [11/30], Step [140/250], Loss: 0.6761615872383118\n", - "Epoch [11/30], Step [150/250], Loss: 0.8030520081520081\n", - "Epoch [11/30], Step [160/250], Loss: 0.6580255627632141\n", - "Epoch [11/30], Step [170/250], Loss: 0.7671869993209839\n", - "Epoch [11/30], Step [180/250], Loss: 0.6622000932693481\n", - "Epoch [11/30], Step [190/250], Loss: 0.747247576713562\n", - "Epoch [11/30], Step [200/250], Loss: 0.705307126045227\n", - "Epoch [11/30], Step [210/250], Loss: 0.6516950130462646\n", - "Epoch [11/30], Step [220/250], Loss: 0.6065223217010498\n", - "Epoch [11/30], Step [230/250], Loss: 0.6885045766830444\n", - "Epoch [11/30], Step [240/250], Loss: 0.7392936944961548\n", - "Epoch [11/30], Step [250/250], Loss: 0.6803852319717407\n", - "Epoch [12/30], Step [10/250], Loss: 0.8813486695289612\n", - "Epoch [12/30], Step [20/250], Loss: 0.7780698537826538\n", - "Epoch [12/30], Step [30/250], Loss: 0.7158650159835815\n", - "Epoch [12/30], Step [40/250], Loss: 0.5826153755187988\n", - "Epoch [12/30], Step [50/250], Loss: 0.6013429760932922\n", - "Epoch [12/30], Step [60/250], Loss: 0.7084614634513855\n", - "Epoch [12/30], Step [70/250], Loss: 0.6825753450393677\n", - "Epoch [12/30], Step [80/250], Loss: 0.6074261665344238\n", - "Epoch [12/30], Step [90/250], Loss: 0.8619674444198608\n", - "Epoch [12/30], Step [100/250], Loss: 0.6013283729553223\n", - "Epoch [12/30], Step [110/250], Loss: 0.6808617115020752\n", - "Epoch [12/30], Step [120/250], Loss: 0.6765388250350952\n", - "Epoch [12/30], Step [130/250], Loss: 0.7072106599807739\n", - "Epoch [12/30], Step [140/250], Loss: 0.6905199289321899\n", - "Epoch [12/30], Step [150/250], Loss: 0.6942532062530518\n", - "Epoch [12/30], Step [160/250], Loss: 0.7181805968284607\n", - "Epoch [12/30], Step [170/250], Loss: 0.6357207298278809\n", - "Epoch [12/30], Step [180/250], Loss: 0.6719130277633667\n", - "Epoch [12/30], Step [190/250], Loss: 0.7218160629272461\n", - "Epoch [12/30], Step [200/250], Loss: 0.7158771753311157\n", - "Epoch [12/30], Step [210/250], Loss: 0.7585588693618774\n", - "Epoch [12/30], Step [220/250], Loss: 0.8121419548988342\n", - "Epoch [12/30], Step [230/250], Loss: 0.7744668126106262\n", - "Epoch [12/30], Step [240/250], Loss: 0.7164073586463928\n", - "Epoch [12/30], Step [250/250], Loss: 0.5488151907920837\n", - "Epoch [13/30], Step [10/250], Loss: 0.7662173509597778\n", - "Epoch [13/30], Step [20/250], Loss: 0.7802825570106506\n", - "Epoch [13/30], Step [30/250], Loss: 0.7456352114677429\n", - "Epoch [13/30], Step [40/250], Loss: 0.6143842935562134\n", - "Epoch [13/30], Step [50/250], Loss: 0.7393404245376587\n", - "Epoch [13/30], Step [60/250], Loss: 0.6536136865615845\n", - "Epoch [13/30], Step [70/250], Loss: 0.7647539377212524\n", - "Epoch [13/30], Step [80/250], Loss: 0.6415259838104248\n", - "Epoch [13/30], Step [90/250], Loss: 0.8065975904464722\n", - "Epoch [13/30], Step [100/250], Loss: 0.654565155506134\n", - "Epoch [13/30], Step [110/250], Loss: 0.6512014865875244\n", - "Epoch [13/30], Step [120/250], Loss: 0.6851429343223572\n", - "Epoch [13/30], Step [130/250], Loss: 0.7639355659484863\n", - "Epoch [13/30], Step [140/250], Loss: 0.7886079549789429\n", - "Epoch [13/30], Step [150/250], Loss: 0.677024245262146\n", - "Epoch [13/30], Step [160/250], Loss: 0.6869807243347168\n", - "Epoch [13/30], Step [170/250], Loss: 0.7076682448387146\n", - "Epoch [13/30], Step [180/250], Loss: 0.6720783710479736\n", - "Epoch [13/30], Step [190/250], Loss: 0.6578226685523987\n", - "Epoch [13/30], Step [200/250], Loss: 0.6924010515213013\n", - "Epoch [13/30], Step [210/250], Loss: 0.8084946870803833\n", - "Epoch [13/30], Step [220/250], Loss: 0.7015032768249512\n", - "Epoch [13/30], Step [230/250], Loss: 0.6897311210632324\n", - "Epoch [13/30], Step [240/250], Loss: 0.7233715653419495\n", - "Epoch [13/30], Step [250/250], Loss: 0.82469242811203\n", - "Epoch [14/30], Step [10/250], Loss: 0.7118442058563232\n", - "Epoch [14/30], Step [20/250], Loss: 0.66881263256073\n", - "Epoch [14/30], Step [30/250], Loss: 0.6966590881347656\n", - "Epoch [14/30], Step [40/250], Loss: 0.8390185236930847\n", - "Epoch [14/30], Step [50/250], Loss: 0.7978378534317017\n", - "Epoch [14/30], Step [60/250], Loss: 0.6207278966903687\n", - "Epoch [14/30], Step [70/250], Loss: 0.6512827277183533\n", - "Epoch [14/30], Step [80/250], Loss: 0.6850301027297974\n", - "Epoch [14/30], Step [90/250], Loss: 0.628646194934845\n", - "Epoch [14/30], Step [100/250], Loss: 0.6093996167182922\n", - "Epoch [14/30], Step [110/250], Loss: 0.7588788866996765\n", - "Epoch [14/30], Step [120/250], Loss: 0.6795099377632141\n", - "Epoch [14/30], Step [130/250], Loss: 0.6357916593551636\n", - "Epoch [14/30], Step [140/250], Loss: 0.7358158826828003\n", - "Epoch [14/30], Step [150/250], Loss: 0.6896149516105652\n", - "Epoch [14/30], Step [160/250], Loss: 0.6862155199050903\n", - "Epoch [14/30], Step [170/250], Loss: 0.659408688545227\n", - "Epoch [14/30], Step [180/250], Loss: 0.717597246170044\n", - "Epoch [14/30], Step [190/250], Loss: 0.6779205203056335\n", - "Epoch [14/30], Step [200/250], Loss: 0.6569654941558838\n", - "Epoch [14/30], Step [210/250], Loss: 0.6521044373512268\n", - "Epoch [14/30], Step [220/250], Loss: 0.5803452134132385\n", - "Epoch [14/30], Step [230/250], Loss: 0.6112836599349976\n", - "Epoch [14/30], Step [240/250], Loss: 0.6311125755310059\n", - "Epoch [14/30], Step [250/250], Loss: 0.6427040696144104\n", - "Epoch [15/30], Step [10/250], Loss: 0.7193827629089355\n", - "Epoch [15/30], Step [20/250], Loss: 0.6781796216964722\n", - "Epoch [15/30], Step [30/250], Loss: 0.7042354345321655\n", - "Epoch [15/30], Step [40/250], Loss: 0.6776638627052307\n", - "Epoch [15/30], Step [50/250], Loss: 0.6593765020370483\n", - "Epoch [15/30], Step [60/250], Loss: 0.6749820113182068\n", - "Epoch [15/30], Step [70/250], Loss: 0.6199281811714172\n", - "Epoch [15/30], Step [80/250], Loss: 0.6898410320281982\n", - "Epoch [15/30], Step [90/250], Loss: 0.6938673257827759\n", - "Epoch [15/30], Step [100/250], Loss: 0.6369883418083191\n", - "Epoch [15/30], Step [110/250], Loss: 0.6758348345756531\n", - "Epoch [15/30], Step [120/250], Loss: 0.7379288673400879\n", - "Epoch [15/30], Step [130/250], Loss: 0.6447997689247131\n", - "Epoch [15/30], Step [140/250], Loss: 0.6910532712936401\n", - "Epoch [15/30], Step [150/250], Loss: 0.7426170110702515\n", - "Epoch [15/30], Step [160/250], Loss: 0.6422319412231445\n", - "Epoch [15/30], Step [170/250], Loss: 0.5789802670478821\n", - "Epoch [15/30], Step [180/250], Loss: 0.7434327602386475\n", - "Epoch [15/30], Step [190/250], Loss: 0.6754781007766724\n", - "Epoch [15/30], Step [200/250], Loss: 0.5865523815155029\n", - "Epoch [15/30], Step [210/250], Loss: 0.6548283696174622\n", - "Epoch [15/30], Step [220/250], Loss: 0.7495550513267517\n", - "Epoch [15/30], Step [230/250], Loss: 0.6538060903549194\n", - "Epoch [15/30], Step [240/250], Loss: 0.7314434051513672\n", - "Epoch [15/30], Step [250/250], Loss: 0.7135218381881714\n", - "Epoch [16/30], Step [10/250], Loss: 0.7383496761322021\n", - "Epoch [16/30], Step [20/250], Loss: 0.644036591053009\n", - "Epoch [16/30], Step [30/250], Loss: 0.6101108193397522\n", - "Epoch [16/30], Step [40/250], Loss: 0.7390760779380798\n", - "Epoch [16/30], Step [50/250], Loss: 0.6870918273925781\n", - "Epoch [16/30], Step [60/250], Loss: 0.6894906759262085\n", - "Epoch [16/30], Step [70/250], Loss: 0.7674188017845154\n", - "Epoch [16/30], Step [80/250], Loss: 0.7476275563240051\n", - "Epoch [16/30], Step [90/250], Loss: 0.7009009718894958\n", - "Epoch [16/30], Step [100/250], Loss: 0.6951045989990234\n", - "Epoch [16/30], Step [110/250], Loss: 0.7023512721061707\n", - "Epoch [16/30], Step [120/250], Loss: 0.6900476217269897\n", - "Epoch [16/30], Step [130/250], Loss: 0.7070642709732056\n", - "Epoch [16/30], Step [140/250], Loss: 0.6627304553985596\n", - "Epoch [16/30], Step [150/250], Loss: 0.676548182964325\n", - "Epoch [16/30], Step [160/250], Loss: 0.7038763761520386\n", - "Epoch [16/30], Step [170/250], Loss: 0.6916297078132629\n", - "Epoch [16/30], Step [180/250], Loss: 0.7028259634971619\n", - "Epoch [16/30], Step [190/250], Loss: 0.6524210572242737\n", - "Epoch [16/30], Step [200/250], Loss: 0.7346513867378235\n", - "Epoch [16/30], Step [210/250], Loss: 0.612514317035675\n", - "Epoch [16/30], Step [220/250], Loss: 0.7455917596817017\n", - "Epoch [16/30], Step [230/250], Loss: 0.747292160987854\n", - "Epoch [16/30], Step [240/250], Loss: 0.7447240352630615\n", - "Epoch [16/30], Step [250/250], Loss: 0.6769564747810364\n", - "Epoch [17/30], Step [10/250], Loss: 0.7425077557563782\n", - "Epoch [17/30], Step [20/250], Loss: 0.6944329738616943\n", - "Epoch [17/30], Step [30/250], Loss: 0.6961978673934937\n", - "Epoch [17/30], Step [40/250], Loss: 0.6465986967086792\n", - "Epoch [17/30], Step [50/250], Loss: 0.714703381061554\n", - "Epoch [17/30], Step [60/250], Loss: 0.5930614471435547\n", - "Epoch [17/30], Step [70/250], Loss: 0.6468428373336792\n", - "Epoch [17/30], Step [80/250], Loss: 0.686537504196167\n", - "Epoch [17/30], Step [90/250], Loss: 0.7371711730957031\n", - "Epoch [17/30], Step [100/250], Loss: 0.7700399160385132\n", - "Epoch [17/30], Step [110/250], Loss: 0.7529278993606567\n", - "Epoch [17/30], Step [120/250], Loss: 0.7036042213439941\n", - "Epoch [17/30], Step [130/250], Loss: 0.7871543765068054\n", - "Epoch [17/30], Step [140/250], Loss: 0.6956086158752441\n", - "Epoch [17/30], Step [150/250], Loss: 0.7426921725273132\n", - "Epoch [17/30], Step [160/250], Loss: 0.7222756743431091\n", - "Epoch [17/30], Step [170/250], Loss: 0.6826121807098389\n", - "Epoch [17/30], Step [180/250], Loss: 0.6970388293266296\n", - "Epoch [17/30], Step [190/250], Loss: 0.7087472677230835\n", - "Epoch [17/30], Step [200/250], Loss: 0.6320711374282837\n", - "Epoch [17/30], Step [210/250], Loss: 0.7280303835868835\n", - "Epoch [17/30], Step [220/250], Loss: 0.6934517621994019\n", - "Epoch [17/30], Step [230/250], Loss: 0.7071420550346375\n", - "Epoch [17/30], Step [240/250], Loss: 0.6856362223625183\n", - "Epoch [17/30], Step [250/250], Loss: 0.6945990324020386\n", - "Epoch [18/30], Step [10/250], Loss: 0.6465855240821838\n", - "Epoch [18/30], Step [20/250], Loss: 0.7086865901947021\n", - "Epoch [18/30], Step [30/250], Loss: 0.6256162524223328\n", - "Epoch [18/30], Step [40/250], Loss: 0.6532611846923828\n", - "Epoch [18/30], Step [50/250], Loss: 0.6484596729278564\n", - "Epoch [18/30], Step [60/250], Loss: 0.6955176591873169\n", - "Epoch [18/30], Step [70/250], Loss: 0.6615030765533447\n", - "Epoch [18/30], Step [80/250], Loss: 0.7038217186927795\n", - "Epoch [18/30], Step [90/250], Loss: 0.6943345069885254\n", - "Epoch [18/30], Step [100/250], Loss: 0.7004052996635437\n", - "Epoch [18/30], Step [110/250], Loss: 0.7458634972572327\n", - "Epoch [18/30], Step [120/250], Loss: 0.6851629614830017\n", - "Epoch [18/30], Step [130/250], Loss: 0.682853102684021\n", - "Epoch [18/30], Step [140/250], Loss: 0.6481672525405884\n", - "Epoch [18/30], Step [150/250], Loss: 0.7038549780845642\n", - "Epoch [18/30], Step [160/250], Loss: 0.6995554566383362\n", - "Epoch [18/30], Step [170/250], Loss: 0.6800370216369629\n", - "Epoch [18/30], Step [180/250], Loss: 0.6488386392593384\n", - "Epoch [18/30], Step [190/250], Loss: 0.7000787854194641\n", - "Epoch [18/30], Step [200/250], Loss: 0.7428950071334839\n", - "Epoch [18/30], Step [210/250], Loss: 0.6872988343238831\n", - "Epoch [18/30], Step [220/250], Loss: 0.6482336521148682\n", - "Epoch [18/30], Step [230/250], Loss: 0.6626957058906555\n", - "Epoch [18/30], Step [240/250], Loss: 0.6778802275657654\n", - "Epoch [18/30], Step [250/250], Loss: 0.7027387022972107\n", - "Epoch [19/30], Step [10/250], Loss: 0.6812503933906555\n", - "Epoch [19/30], Step [20/250], Loss: 0.6751934289932251\n", - "Epoch [19/30], Step [30/250], Loss: 0.6624279618263245\n", - "Epoch [19/30], Step [40/250], Loss: 0.6787773966789246\n", - "Epoch [19/30], Step [50/250], Loss: 0.7765601873397827\n", - "Epoch [19/30], Step [60/250], Loss: 0.6592363119125366\n", - "Epoch [19/30], Step [70/250], Loss: 0.7038179039955139\n", - "Epoch [19/30], Step [80/250], Loss: 0.7358537316322327\n", - "Epoch [19/30], Step [90/250], Loss: 0.708828330039978\n", - "Epoch [19/30], Step [100/250], Loss: 0.7642552852630615\n", - "Epoch [19/30], Step [110/250], Loss: 0.7605912089347839\n", - "Epoch [19/30], Step [120/250], Loss: 0.6976773738861084\n", - "Epoch [19/30], Step [130/250], Loss: 0.6766220331192017\n", - "Epoch [19/30], Step [140/250], Loss: 0.7171740531921387\n", - "Epoch [19/30], Step [150/250], Loss: 0.6521143913269043\n", - "Epoch [19/30], Step [160/250], Loss: 0.6554864645004272\n", - "Epoch [19/30], Step [170/250], Loss: 0.6797289848327637\n", - "Epoch [19/30], Step [180/250], Loss: 0.6546230316162109\n", - "Epoch [19/30], Step [190/250], Loss: 0.6951708197593689\n", - "Epoch [19/30], Step [200/250], Loss: 0.7692861557006836\n", - "Epoch [19/30], Step [210/250], Loss: 0.6987319588661194\n", - "Epoch [19/30], Step [220/250], Loss: 0.7281709909439087\n", - "Epoch [19/30], Step [230/250], Loss: 0.6981549263000488\n", - "Epoch [19/30], Step [240/250], Loss: 0.6613932847976685\n", - "Epoch [19/30], Step [250/250], Loss: 0.6515719890594482\n", - "Epoch [20/30], Step [10/250], Loss: 0.683667004108429\n", - "Epoch [20/30], Step [20/250], Loss: 0.6330690383911133\n", - "Epoch [20/30], Step [30/250], Loss: 0.6992578506469727\n", - "Epoch [20/30], Step [40/250], Loss: 0.7081963419914246\n", - "Epoch [20/30], Step [50/250], Loss: 0.7147829532623291\n", - "Epoch [20/30], Step [60/250], Loss: 0.6547238826751709\n", - "Epoch [20/30], Step [70/250], Loss: 0.627391517162323\n", - "Epoch [20/30], Step [80/250], Loss: 0.6972628831863403\n", - "Epoch [20/30], Step [90/250], Loss: 0.6500757932662964\n", - "Epoch [20/30], Step [100/250], Loss: 0.7282431125640869\n", - "Epoch [20/30], Step [110/250], Loss: 0.6599644422531128\n", - "Epoch [20/30], Step [120/250], Loss: 0.691277265548706\n", - "Epoch [20/30], Step [130/250], Loss: 0.6712023019790649\n", - "Epoch [20/30], Step [140/250], Loss: 0.6875613927841187\n", - "Epoch [20/30], Step [150/250], Loss: 0.6852554082870483\n", - "Epoch [20/30], Step [160/250], Loss: 0.7059615850448608\n", - "Epoch [20/30], Step [170/250], Loss: 0.7474350333213806\n", - "Epoch [20/30], Step [180/250], Loss: 0.6700282096862793\n", - "Epoch [20/30], Step [190/250], Loss: 0.7267058491706848\n", - "Epoch [20/30], Step [200/250], Loss: 0.6795942783355713\n", - "Epoch [20/30], Step [210/250], Loss: 0.7355214953422546\n", - "Epoch [20/30], Step [220/250], Loss: 0.7097989320755005\n", - "Epoch [20/30], Step [230/250], Loss: 0.6741981506347656\n", - "Epoch [20/30], Step [240/250], Loss: 0.7197920680046082\n", - "Epoch [20/30], Step [250/250], Loss: 0.6666856408119202\n", - "Epoch [21/30], Step [10/250], Loss: 0.6850540637969971\n", - "Epoch [21/30], Step [20/250], Loss: 0.6577891111373901\n", - "Epoch [21/30], Step [30/250], Loss: 0.7145082354545593\n", - "Epoch [21/30], Step [40/250], Loss: 0.6782787442207336\n", - "Epoch [21/30], Step [50/250], Loss: 0.7092875242233276\n", - "Epoch [21/30], Step [60/250], Loss: 0.6552045941352844\n", - "Epoch [21/30], Step [70/250], Loss: 0.665422260761261\n", - "Epoch [21/30], Step [80/250], Loss: 0.7131606340408325\n", - "Epoch [21/30], Step [90/250], Loss: 0.6851215362548828\n", - "Epoch [21/30], Step [100/250], Loss: 0.7093809843063354\n", - "Epoch [21/30], Step [110/250], Loss: 0.6839103698730469\n", - "Epoch [21/30], Step [120/250], Loss: 0.6863808035850525\n", - "Epoch [21/30], Step [130/250], Loss: 0.6923962831497192\n", - "Epoch [21/30], Step [140/250], Loss: 0.7143585085868835\n", - "Epoch [21/30], Step [150/250], Loss: 0.7165741324424744\n", - "Epoch [21/30], Step [160/250], Loss: 0.7011140584945679\n", - "Epoch [21/30], Step [170/250], Loss: 0.7145777344703674\n", - "Epoch [21/30], Step [180/250], Loss: 0.6781455278396606\n", - "Epoch [21/30], Step [190/250], Loss: 0.704175591468811\n", - "Epoch [21/30], Step [200/250], Loss: 0.6643280982971191\n", - "Epoch [21/30], Step [210/250], Loss: 0.7143128514289856\n", - "Epoch [21/30], Step [220/250], Loss: 0.7122169137001038\n", - "Epoch [21/30], Step [230/250], Loss: 0.7329443693161011\n", - "Epoch [21/30], Step [240/250], Loss: 0.7038950324058533\n", - "Epoch [21/30], Step [250/250], Loss: 0.683397114276886\n", - "Epoch [22/30], Step [10/250], Loss: 0.6960069537162781\n", - "Epoch [22/30], Step [20/250], Loss: 0.6595947742462158\n", - "Epoch [22/30], Step [30/250], Loss: 0.7287018895149231\n", - "Epoch [22/30], Step [40/250], Loss: 0.7046036720275879\n", - "Epoch [22/30], Step [50/250], Loss: 0.7062811255455017\n", - "Epoch [22/30], Step [60/250], Loss: 0.7442296743392944\n", - "Epoch [22/30], Step [70/250], Loss: 0.6482053399085999\n", - "Epoch [22/30], Step [80/250], Loss: 0.722833514213562\n", - "Epoch [22/30], Step [90/250], Loss: 0.6747336387634277\n", - "Epoch [22/30], Step [100/250], Loss: 0.7139792442321777\n", - "Epoch [22/30], Step [110/250], Loss: 0.680081844329834\n", - "Epoch [22/30], Step [120/250], Loss: 0.686549186706543\n", - "Epoch [22/30], Step [130/250], Loss: 0.6854720115661621\n", - "Epoch [22/30], Step [140/250], Loss: 0.6525530815124512\n", - "Epoch [22/30], Step [150/250], Loss: 0.6676555871963501\n", - "Epoch [22/30], Step [160/250], Loss: 0.7014628052711487\n", - "Epoch [22/30], Step [170/250], Loss: 0.7186480760574341\n", - "Epoch [22/30], Step [180/250], Loss: 0.6748342514038086\n", - "Epoch [22/30], Step [190/250], Loss: 0.7034397125244141\n", - "Epoch [22/30], Step [200/250], Loss: 0.6637327075004578\n", - "Epoch [22/30], Step [210/250], Loss: 0.6852638125419617\n", - "Epoch [22/30], Step [220/250], Loss: 0.6631066203117371\n", - "Epoch [22/30], Step [230/250], Loss: 0.7248471975326538\n", - "Epoch [22/30], Step [240/250], Loss: 0.7282781004905701\n", - "Epoch [22/30], Step [250/250], Loss: 0.678613007068634\n", - "Epoch [23/30], Step [10/250], Loss: 0.6844161748886108\n", - "Epoch [23/30], Step [20/250], Loss: 0.6881325244903564\n", - "Epoch [23/30], Step [30/250], Loss: 0.6631232500076294\n", - "Epoch [23/30], Step [40/250], Loss: 0.7202731370925903\n", - "Epoch [23/30], Step [50/250], Loss: 0.6977999210357666\n", - "Epoch [23/30], Step [60/250], Loss: 0.7103397846221924\n", - "Epoch [23/30], Step [70/250], Loss: 0.6726264953613281\n", - "Epoch [23/30], Step [80/250], Loss: 0.6642501354217529\n", - "Epoch [23/30], Step [90/250], Loss: 0.7357184886932373\n", - "Epoch [23/30], Step [100/250], Loss: 0.7160366773605347\n", - "Epoch [23/30], Step [110/250], Loss: 0.6603021621704102\n", - "Epoch [23/30], Step [120/250], Loss: 0.6760040521621704\n", - "Epoch [23/30], Step [130/250], Loss: 0.696141242980957\n", - "Epoch [23/30], Step [140/250], Loss: 0.6645365357398987\n", - "Epoch [23/30], Step [150/250], Loss: 0.7011918425559998\n", - "Epoch [23/30], Step [160/250], Loss: 0.6758050322532654\n", - "Epoch [23/30], Step [170/250], Loss: 0.6683043837547302\n", - "Epoch [23/30], Step [180/250], Loss: 0.6827936172485352\n", - "Epoch [23/30], Step [190/250], Loss: 0.699557900428772\n", - "Epoch [23/30], Step [200/250], Loss: 0.6873543858528137\n", - "Epoch [23/30], Step [210/250], Loss: 0.6973046064376831\n", - "Epoch [23/30], Step [220/250], Loss: 0.6847941279411316\n", - "Epoch [23/30], Step [230/250], Loss: 0.686026930809021\n", - "Epoch [23/30], Step [240/250], Loss: 0.712138831615448\n", - "Epoch [23/30], Step [250/250], Loss: 0.6938803791999817\n", - "Epoch [24/30], Step [10/250], Loss: 0.6833834648132324\n", - "Epoch [24/30], Step [20/250], Loss: 0.7029370069503784\n", - "Epoch [24/30], Step [30/250], Loss: 0.6896952390670776\n", - "Epoch [24/30], Step [40/250], Loss: 0.6966062784194946\n", - "Epoch [24/30], Step [50/250], Loss: 0.6755800247192383\n", - "Epoch [24/30], Step [60/250], Loss: 0.6890952587127686\n", - "Epoch [24/30], Step [70/250], Loss: 0.6705589294433594\n", - "Epoch [24/30], Step [80/250], Loss: 0.7066176533699036\n", - "Epoch [24/30], Step [90/250], Loss: 0.758873701095581\n", - "Epoch [24/30], Step [100/250], Loss: 0.699566125869751\n", - "Epoch [24/30], Step [110/250], Loss: 0.7008506059646606\n", - "Epoch [24/30], Step [120/250], Loss: 0.686880350112915\n", - "Epoch [24/30], Step [130/250], Loss: 0.6831185817718506\n", - "Epoch [24/30], Step [140/250], Loss: 0.6989403963088989\n", - "Epoch [24/30], Step [150/250], Loss: 0.7022895812988281\n", - "Epoch [24/30], Step [160/250], Loss: 0.7047298550605774\n", - "Epoch [24/30], Step [170/250], Loss: 0.6803637742996216\n", - "Epoch [24/30], Step [180/250], Loss: 0.6698098182678223\n", - "Epoch [24/30], Step [190/250], Loss: 0.6965357661247253\n", - "Epoch [24/30], Step [200/250], Loss: 0.7183314561843872\n", - "Epoch [24/30], Step [210/250], Loss: 0.7083855271339417\n", - "Epoch [24/30], Step [220/250], Loss: 0.688880205154419\n", - "Epoch [24/30], Step [230/250], Loss: 0.6859614253044128\n", - "Epoch [24/30], Step [240/250], Loss: 0.6815621852874756\n", - "Epoch [24/30], Step [250/250], Loss: 0.7023071050643921\n", - "Epoch [25/30], Step [10/250], Loss: 0.6979001760482788\n", - "Epoch [25/30], Step [20/250], Loss: 0.6792093515396118\n", - "Epoch [25/30], Step [30/250], Loss: 0.7000377178192139\n", - "Epoch [25/30], Step [40/250], Loss: 0.6891401410102844\n", - "Epoch [25/30], Step [50/250], Loss: 0.6950706839561462\n", - "Epoch [25/30], Step [60/250], Loss: 0.6931962966918945\n", - "Epoch [25/30], Step [70/250], Loss: 0.6918748021125793\n", - "Epoch [25/30], Step [80/250], Loss: 0.7022840976715088\n", - "Epoch [25/30], Step [90/250], Loss: 0.7233110666275024\n", - "Epoch [25/30], Step [100/250], Loss: 0.6882573366165161\n", - "Epoch [25/30], Step [110/250], Loss: 0.6959525346755981\n", - "Epoch [25/30], Step [120/250], Loss: 0.6953780651092529\n", - "Epoch [25/30], Step [130/250], Loss: 0.7029913067817688\n", - "Epoch [25/30], Step [140/250], Loss: 0.7104859948158264\n", - "Epoch [25/30], Step [150/250], Loss: 0.6983399391174316\n", - "Epoch [25/30], Step [160/250], Loss: 0.6920713186264038\n", - "Epoch [25/30], Step [170/250], Loss: 0.7179511189460754\n", - "Epoch [25/30], Step [180/250], Loss: 0.6971415281295776\n", - "Epoch [25/30], Step [190/250], Loss: 0.7037041783332825\n", - "Epoch [25/30], Step [200/250], Loss: 0.6952695846557617\n", - "Epoch [25/30], Step [210/250], Loss: 0.7007227540016174\n", - "Epoch [25/30], Step [220/250], Loss: 0.686070442199707\n", - "Epoch [25/30], Step [230/250], Loss: 0.692324161529541\n", - "Epoch [25/30], Step [240/250], Loss: 0.6936407089233398\n", - "Epoch [25/30], Step [250/250], Loss: 0.6896817088127136\n", - "Epoch [26/30], Step [10/250], Loss: 0.7085744142532349\n", - "Epoch [26/30], Step [20/250], Loss: 0.6863793730735779\n", - "Epoch [26/30], Step [30/250], Loss: 0.6817866563796997\n", - "Epoch [26/30], Step [40/250], Loss: 0.7037662267684937\n", - "Epoch [26/30], Step [50/250], Loss: 0.7046667337417603\n", - "Epoch [26/30], Step [60/250], Loss: 0.6918007135391235\n", - "Epoch [26/30], Step [70/250], Loss: 0.713044285774231\n", - "Epoch [26/30], Step [80/250], Loss: 0.6832862496376038\n", - "Epoch [26/30], Step [90/250], Loss: 0.667504608631134\n", - "Epoch [26/30], Step [100/250], Loss: 0.6760569214820862\n", - "Epoch [26/30], Step [110/250], Loss: 0.707482099533081\n", - "Epoch [26/30], Step [120/250], Loss: 0.6977518200874329\n", - "Epoch [26/30], Step [130/250], Loss: 0.6955530047416687\n", - "Epoch [26/30], Step [140/250], Loss: 0.7124805450439453\n", - "Epoch [26/30], Step [150/250], Loss: 0.6924611330032349\n", - "Epoch [26/30], Step [160/250], Loss: 0.6965060234069824\n", - "Epoch [26/30], Step [170/250], Loss: 0.6868378520011902\n", - "Epoch [26/30], Step [180/250], Loss: 0.7103825807571411\n", - "Epoch [26/30], Step [190/250], Loss: 0.6711806654930115\n", - "Epoch [26/30], Step [200/250], Loss: 0.6948347091674805\n", - "Epoch [26/30], Step [210/250], Loss: 0.7058894634246826\n", - "Epoch [26/30], Step [220/250], Loss: 0.6947336196899414\n", - "Epoch [26/30], Step [230/250], Loss: 0.689943253993988\n", - "Epoch [26/30], Step [240/250], Loss: 0.6956008672714233\n", - "Epoch [26/30], Step [250/250], Loss: 0.6892440319061279\n", - "Epoch [27/30], Step [10/250], Loss: 0.6945648193359375\n", - "Epoch [27/30], Step [20/250], Loss: 0.697243332862854\n", - "Epoch [27/30], Step [30/250], Loss: 0.6995589137077332\n", - "Epoch [27/30], Step [40/250], Loss: 0.6961522698402405\n", - "Epoch [27/30], Step [50/250], Loss: 0.7141368389129639\n", - "Epoch [27/30], Step [60/250], Loss: 0.6883167028427124\n", - "Epoch [27/30], Step [70/250], Loss: 0.681597888469696\n", - "Epoch [27/30], Step [80/250], Loss: 0.6933290362358093\n", - "Epoch [27/30], Step [90/250], Loss: 0.6990853548049927\n", - "Epoch [27/30], Step [100/250], Loss: 0.6930828094482422\n", - "Epoch [27/30], Step [110/250], Loss: 0.6889819502830505\n", - "Epoch [27/30], Step [120/250], Loss: 0.6966762542724609\n", - "Epoch [27/30], Step [130/250], Loss: 0.7014245986938477\n", - "Epoch [27/30], Step [140/250], Loss: 0.7081984281539917\n", - "Epoch [27/30], Step [150/250], Loss: 0.6894259452819824\n", - "Epoch [27/30], Step [160/250], Loss: 0.695622444152832\n", - "Epoch [27/30], Step [170/250], Loss: 0.6961721181869507\n", - "Epoch [27/30], Step [180/250], Loss: 0.6897941827774048\n", - "Epoch [27/30], Step [190/250], Loss: 0.6890014410018921\n", - "Epoch [27/30], Step [200/250], Loss: 0.6775841116905212\n", - "Epoch [27/30], Step [210/250], Loss: 0.6889995336532593\n", - "Epoch [27/30], Step [220/250], Loss: 0.6887487769126892\n", - "Epoch [27/30], Step [230/250], Loss: 0.6713950037956238\n", - "Epoch [27/30], Step [240/250], Loss: 0.6815714836120605\n", - "Epoch [27/30], Step [250/250], Loss: 0.6999087333679199\n", - "Epoch [28/30], Step [10/250], Loss: 0.7005322575569153\n", - "Epoch [28/30], Step [20/250], Loss: 0.6854400634765625\n", - "Epoch [28/30], Step [30/250], Loss: 0.7016850113868713\n", - "Epoch [28/30], Step [40/250], Loss: 0.6971641182899475\n", - "Epoch [28/30], Step [50/250], Loss: 0.6831482648849487\n", - "Epoch [28/30], Step [60/250], Loss: 0.6957387924194336\n", - "Epoch [28/30], Step [70/250], Loss: 0.6991732716560364\n", - "Epoch [28/30], Step [80/250], Loss: 0.6832884550094604\n", - "Epoch [28/30], Step [90/250], Loss: 0.6862078309059143\n", - "Epoch [28/30], Step [100/250], Loss: 0.7001485824584961\n", - "Epoch [28/30], Step [110/250], Loss: 0.686698317527771\n", - "Epoch [28/30], Step [120/250], Loss: 0.6935960054397583\n", - "Epoch [28/30], Step [130/250], Loss: 0.6797569990158081\n", - "Epoch [28/30], Step [140/250], Loss: 0.6913435459136963\n", - "Epoch [28/30], Step [150/250], Loss: 0.7099695205688477\n", - "Epoch [28/30], Step [160/250], Loss: 0.6739814877510071\n", - "Epoch [28/30], Step [170/250], Loss: 0.691004753112793\n", - "Epoch [28/30], Step [180/250], Loss: 0.6871265172958374\n", - "Epoch [28/30], Step [190/250], Loss: 0.6769859790802002\n", - "Epoch [28/30], Step [200/250], Loss: 0.6753854751586914\n", - "Epoch [28/30], Step [210/250], Loss: 0.6798712015151978\n", - "Epoch [28/30], Step [220/250], Loss: 0.6959697008132935\n", - "Epoch [28/30], Step [230/250], Loss: 0.6912880539894104\n", - "Epoch [28/30], Step [240/250], Loss: 0.7011526823043823\n", - "Epoch [28/30], Step [250/250], Loss: 0.6955965757369995\n", - "Epoch [29/30], Step [10/250], Loss: 0.700312077999115\n", - "Epoch [29/30], Step [20/250], Loss: 0.688980758190155\n", - "Epoch [29/30], Step [30/250], Loss: 0.687660813331604\n", - "Epoch [29/30], Step [40/250], Loss: 0.6973135471343994\n", - "Epoch [29/30], Step [50/250], Loss: 0.7041200995445251\n", - "Epoch [29/30], Step [60/250], Loss: 0.6702690720558167\n", - "Epoch [29/30], Step [70/250], Loss: 0.695311427116394\n", - "Epoch [29/30], Step [80/250], Loss: 0.7089749574661255\n", - "Epoch [29/30], Step [90/250], Loss: 0.6968417763710022\n", - "Epoch [29/30], Step [100/250], Loss: 0.6854453086853027\n", - "Epoch [29/30], Step [110/250], Loss: 0.6853547096252441\n", - "Epoch [29/30], Step [120/250], Loss: 0.6865882277488708\n", - "Epoch [29/30], Step [130/250], Loss: 0.6883337497711182\n", - "Epoch [29/30], Step [140/250], Loss: 0.705528974533081\n", - "Epoch [29/30], Step [150/250], Loss: 0.6866053938865662\n", - "Epoch [29/30], Step [160/250], Loss: 0.6900249123573303\n", - "Epoch [29/30], Step [170/250], Loss: 0.6984312534332275\n", - "Epoch [29/30], Step [180/250], Loss: 0.7001223564147949\n", - "Epoch [29/30], Step [190/250], Loss: 0.6993950605392456\n", - "Epoch [29/30], Step [200/250], Loss: 0.6955195069313049\n", - "Epoch [29/30], Step [210/250], Loss: 0.7174205183982849\n", - "Epoch [29/30], Step [220/250], Loss: 0.6770732998847961\n", - "Epoch [29/30], Step [230/250], Loss: 0.6760091781616211\n", - "Epoch [29/30], Step [240/250], Loss: 0.6769121885299683\n", - "Epoch [29/30], Step [250/250], Loss: 0.7050588130950928\n", - "Epoch [30/30], Step [10/250], Loss: 0.6745777130126953\n", - "Epoch [30/30], Step [20/250], Loss: 0.6881678104400635\n", - "Epoch [30/30], Step [30/250], Loss: 0.6794246435165405\n", - "Epoch [30/30], Step [40/250], Loss: 0.7122002840042114\n", - "Epoch [30/30], Step [50/250], Loss: 0.698681116104126\n", - "Epoch [30/30], Step [60/250], Loss: 0.7196323871612549\n", - "Epoch [30/30], Step [70/250], Loss: 0.6916103363037109\n", - "Epoch [30/30], Step [80/250], Loss: 0.6879148483276367\n", - "Epoch [30/30], Step [90/250], Loss: 0.7075177431106567\n", - "Epoch [30/30], Step [100/250], Loss: 0.6686447858810425\n", - "Epoch [30/30], Step [110/250], Loss: 0.7030155062675476\n", - "Epoch [30/30], Step [120/250], Loss: 0.7014066576957703\n", - "Epoch [30/30], Step [130/250], Loss: 0.7121413946151733\n", - "Epoch [30/30], Step [140/250], Loss: 0.6912719011306763\n", - "Epoch [30/30], Step [150/250], Loss: 0.6733638048171997\n", - "Epoch [30/30], Step [160/250], Loss: 0.7193289399147034\n", - "Epoch [30/30], Step [170/250], Loss: 0.6880522966384888\n", - "Epoch [30/30], Step [180/250], Loss: 0.7069193720817566\n", - "Epoch [30/30], Step [190/250], Loss: 0.6976951360702515\n", - "Epoch [30/30], Step [200/250], Loss: 0.6925494074821472\n", - "Epoch [30/30], Step [210/250], Loss: 0.6907849907875061\n", - "Epoch [30/30], Step [220/250], Loss: 0.6824172735214233\n", - "Epoch [30/30], Step [230/250], Loss: 0.6865588426589966\n", - "Epoch [30/30], Step [240/250], Loss: 0.6921617984771729\n", - "Epoch [30/30], Step [250/250], Loss: 0.6736024618148804\n", - "\n", - "Training job (trainu90lc57j1vm) succeeded, you can check the logs/metrics/output in the console:\n", - "https://pai.console.aliyun.com/?regionId=cn-hangzhou&workspaceId=58670#/training/jobs/trainu90lc57j1vm\n" - ] - } - ], - "source": [ - "from pai.estimator import Estimator\n", - "from pai.image import retrieve\n", - "\n", - "\n", - "# 训练数据的总迭代次数为30\n", - "epochs = 30\n", - "\n", - "resume_est = Estimator(\n", - " command=\"python train.py --epochs {}\".format(epochs),\n", - " source_dir=\"./train_src/\",\n", - " image_uri=retrieve(\"PyTorch\", \"latest\").image_uri,\n", - " instance_type=\"ecs.c6.large\",\n", - " # 使用上一个训练作业的checkpoints,相应的OSS Bucket路径会被挂载到 /ml/output/checkpoints 路径下\n", - " checkpoints_path=est.checkpoints_data(),\n", - " base_job_name=\"torch_resume_checkpoint\",\n", - ")\n", - "\n", - "resume_est.fit()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "通过训练作业日志的,我们可以看到训练作业加载了之前训练作业的checkpoint,在此基础上,从第11个epoch开始继续训练。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 结语\n", - "\n", - "本文以`PyTorch`为示例,介绍了如何在PAI的训练作业中使用`checkpoint`:训练代码可以通过`/ml/output/checkpoints/`路径保存和加载`checkpoints`文件,`checkpoints`文件将被保存到OSS Bucket上。当用户使用其他的训练框架,例如`TensorFlow`、`HuggingFace transformers`、`ModelScope`等,也可以通过类似的方式在PAI的训练作业中使用`checkpoint`。\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "base", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/source/tutorial/framework.rst b/docs/source/tutorial/framework.rst deleted file mode 100644 index 48fbbb2..0000000 --- a/docs/source/tutorial/framework.rst +++ /dev/null @@ -1,17 +0,0 @@ -=========================================== -机器学习框架 -=========================================== - - - - -.. toctree:: - :maxdepth: 1 - :caption: 示例教程 - - - 训练和部署PyTorch模型 - 训练和部署XGBoost模型 - 训练和部署Tensorflow模型 - 基于HuggingFace BERT训练和部署文本分类模型 - 使用ModelScope ViT训练和部署图片分类模型 diff --git a/docs/source/tutorial/huggingface_bert/huggingface_bert.ipynb b/docs/source/tutorial/huggingface_bert/huggingface_bert.ipynb deleted file mode 100644 index cb90726..0000000 --- a/docs/source/tutorial/huggingface_bert/huggingface_bert.ipynb +++ /dev/null @@ -1,848 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "bb57c39e-16f6-4f84-b071-7751bd01b4c4", - "metadata": { - "ExecutionIndicator": { - "show": true - }, - "tags": [] - }, - "source": [ - "# HuggingFace BERT模型部署和微调训练\n", - "\n", - "[HuggingFace](https://huggingface.co/) 是一个开源开放的AI社区平台,允许用户共享自己的AI项目、数据集和模型,同时也为用户提供了各种机器学习工具,包括`transformers`、`diffusers`、`accelerate`等。通过HuggingFace社区,用户可以轻松地构建和训练自己的模型,并将其应用于各种实际场景中。\n", - "\n", - "当前文档中,我们以HuggingFace提供的[BERT预训练模型-英文-base](https://huggingface.co/bert-base-uncased)预训练模型为示例,展示如何在PAI微调训练和部署BERT模型,主要内容包括以下:\n", - "\n", - "1. SDK安装和配置:\n", - "\n", - "安装所需的SDK,并完成PAI Python SDK配置。\n", - "\n", - "2. 直接部署BERT模型创建推理服务\n", - "\n", - "将HuggingFace上的BERT模型直接模型部署到PAI-EAS,创建一个在线推理服务。\n", - "\n", - "3. 使用BERT模型微调训练\n", - "\n", - "基于BERT模型,我们使用公共数据集进行微调训练,以获得一个可以用于情感分类的模型,然后将输出的模型部署到PAI-EAS,创建一个在线推理服务。\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "73692608-6d3f-4551-9eeb-e169bfa93799", - "metadata": {}, - "source": [ - "## Step1: SDK的安装配置\n", - "\n", - "我们将使用PAI提供的Python SDK,提交训练作业,部署模型。请通过以下命令安装PAI Python SDK,以及需要使用到的Huggingface datasets等依赖库。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c09a58a3-7cf9-43ac-b386-3bafffbf6321", - "metadata": { - "ExecutionIndicator": { - "show": true - }, - "tags": [ - "skip-execution" - ] - }, - "outputs": [], - "source": [ - "!python -m pip install --upgrade alipai" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3bee87d5", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "!python -m pip install datasets huggingface_hub" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "5212ae4f-cb05-45a0-82f1-3d1cd89be38b", - "metadata": {}, - "source": [ - "\n", - "SDK需要配置访问阿里云服务需要的AccessKey,以及当前使用的工作空间和OSS Bucket。在PAI Python SDK安装之后,通过在**命令行终端**中执行以下命令,按照引导配置密钥,工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在命令行终端中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过执行以下代码验证当前的配置是否成功。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "55bcb9aa-58ee-47a0-9656-446c5bf67845", - "metadata": { - "ExecutionIndicator": { - "show": true - }, - "tags": [] - }, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "sess = get_default_session()\n", - "\n", - "assert sess.workspace_name is not None" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "5952d3b9", - "metadata": {}, - "source": [ - "## Step2: 部署BERT模型创建推理服务\n", - "\n", - "\n", - "[PAI-EAS](https://www.aliyun.com/activity/bigdata/pai/eas) (Elastic Algorithm Service) 是PAI平台上的模型在线预测服务,支持使用镜像模式部署模型,并且提供了常见的机器学习框架的推理镜像。 在以下示例中,我们将使用PAI-EAS提供的镜像,将HuggingFace上的BERT模型直接部署到PAI,创建一个在线推理服务。\n", - "\n", - "[BERT](https://arxiv.org/abs/1810.04805)是Google提出的一种预训练语言模型,使用自监督学习方法在大型英文语料库上进行训练。他可以直接用于\"完形填空\"的任务,也可以作为下游任务的预训练模型,通过微调训练,用于分类,问答等不同的任务。我们通过以下代码下载HuggingFace提供的BERT模型,用于创建一个支持“完形填空”的推理服务。\n", - "\n", - "> 对于如何在离线模式下保存和使用HuggingFace模型,用户可以参考HuggingFace的官方文档: [HuggingFace Offline Mode](https://huggingface.co/docs/transformers/installation#fetch-models-and-tokenizers-to-use-offline)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "25ae41ce", - "metadata": {}, - "outputs": [], - "source": [ - "from huggingface_hub import snapshot_download\n", - "\n", - "\n", - "# 下载BERT模型(PyTorch版本)\n", - "model_dir = snapshot_download(\n", - " repo_id=\"bert-base-uncased\",\n", - " local_dir=\"./bert\",\n", - " allow_patterns=[\n", - " \"config.json\",\n", - " \"pytorch_model.bin\",\n", - " \"vocab.txt\",\n", - " \"tokenizer_config.json\",\n", - " \"tokenizer.json\",\n", - " ],\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "bf489d6a", - "metadata": {}, - "source": [ - "用户也可以通过以下的方式保存模型(需要用户在本地install`transformers`, `pytorch`等依赖库):\n", - "\n", - "```python\n", - "\n", - "from transformers import BertTokenizer, BertModel\n", - "\n", - "# 下载模型\n", - "tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n", - "model = BertModel.from_pretrained(\"bert-base-uncased\")\n", - "\n", - "# 保存模型到本地路径\n", - "model_dir = \"./bert/\"\n", - "model.save_pretrained(model_dir)\n", - "tokenizer.save_pretrained(model_dir)\n", - "\n", - "```\n", - "\n", - "保存的模型,可以直接通过`transformers`库加载使用:\n", - "\n", - "```python\n", - "\n", - "from transformers import BertTokenizer, BertModel\n", - "\n", - "model = BertModel.from_pretrained(\"./bert/\")\n", - "tokenizer = BertTokenizer.from_pretrained(\"./bert/\")\n", - "\n", - "```\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "14acee39", - "metadata": {}, - "source": [ - "将保存在本地的BERT模型和tokenizer上传到OSS Bucket,拿到模型的OSS路径。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "25debdca", - "metadata": {}, - "outputs": [], - "source": [ - "from pai.common.oss_utils import upload\n", - "\n", - "# 上传模型\n", - "bert_model_uri = upload(\n", - " source_path=model_dir, oss_path=\"huggingface/model/bert/\", bucket=sess.oss_bucket\n", - ")\n", - "print(bert_model_uri)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "088cf89b", - "metadata": {}, - "source": [ - "\n", - "在部署模型之前,我们需要准备模型推理服务的代码,用于加载模型,提供HTTP服务。在以下示例中,我们使用[FastAPI](https://fastapi.tiangolo.com/)编写了一个简单的HTTP服务,用于加载模型,提供预测服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a524a388", - "metadata": {}, - "outputs": [], - "source": [ - "# 创建推理服务使用的代码\n", - "!mkdir -p serving_src" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "b804c39c", - "metadata": {}, - "source": [ - "完整的推理服务程序代码如下:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d8cd70ff", - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile serving_src/run.py\n", - "\n", - "import os\n", - "import logging\n", - "\n", - "import uvicorn, json, datetime\n", - "from fastapi import FastAPI, Request\n", - "from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification\n", - "\n", - "# 用户指定模型,默认会被加载到当前路径下\n", - "MODEL_PATH = \"/eas/workspace/model/\"\n", - "\n", - "logging.basicConfig(level=logging.INFO)\n", - "logger = logging.getLogger(\"model_server\")\n", - "\n", - "app = FastAPI()\n", - "\n", - "@app.post(\"/\")\n", - "async def predict(request: Request):\n", - " global bert_pipeline\n", - " json_data = await request.json()\n", - " logger.info(\"Input data: %s\", json_data)\n", - " result = bert_pipeline(json_data[\"text\"])\n", - " logger.info(\"Prediction result: %s\", result)\n", - " return result\n", - "\n", - "\n", - "if __name__ == '__main__':\n", - " task = os.environ.get(\"HF_TASK\", \"fill-mask\")\n", - " bert_pipeline = pipeline(task=task, model=MODEL_PATH, tokenizer=MODEL_PATH)\n", - "\n", - " uvicorn.run(app, host='0.0.0.0', port=int(os.environ.get(\"LISTENING_PORT\", 8000)))" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "77a3568b", - "metadata": {}, - "source": [ - "SDK 提供的 `pai.model.InferenceSpec` 用于描述如何加载模型,以及如何提供预测服务。在以下代码中,我们使用 `pai.model.container_serving_spec` 方法,使用 PAI 提供的推理镜像和本地代码 `serving_src`,创建一个 `InferenceSpec` 对象。对应的本地代码会被上传保存到用户OSS,然后通过挂载的方式将相应的代码准备到运行容器中。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ab6c3828", - "metadata": {}, - "outputs": [], - "source": [ - "from pai.model import Model, container_serving_spec\n", - "from pai.image import retrieve, ImageScope\n", - "\n", - "\n", - "# 使用 PAI 提供的 PyTorch CPU 推理镜像\n", - "image_uri = retrieve(\n", - " \"PyTorch\",\n", - " framework_version=\"latest\",\n", - " accelerator_type=\"CPU\",\n", - " image_scope=ImageScope.INFERENCE,\n", - ").image_uri\n", - "print(image_uri)\n", - "\n", - "\n", - "# 构建一个使用镜像部署的InferenceSpec,可以用于BERT模型部署为推理服务.\n", - "bert_inference_spec = container_serving_spec(\n", - " # 模型服务的启动命令\n", - " command=\"python run.py\",\n", - " # 模型服务依赖的代码\n", - " source_dir=\"./serving_src\",\n", - " image_uri=image_uri,\n", - " requirements=[\n", - " \"transformers\",\n", - " \"fastapi\",\n", - " \"uvicorn\",\n", - " # 推理 pipeline 使用 device_map=\"auto\" 时需要安装\n", - " \"accelerate\",\n", - " ],\n", - ")\n", - "\n", - "print(bert_inference_spec.to_dict())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "debba5d4", - "metadata": {}, - "source": [ - "### 模型部署\n", - "\n", - "通过构建Model,调用`Model.deploy`方法,可以将模型部署到PAI-EAS,生成在线服务。\n", - "\n", - "关于如何使用SDK部署模型的详细介绍,用户可以参考文档:[PAI Python SDK部署推理服务](https://help.aliyun.com/document_detail/2261532.html)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "60b07fb7", - "metadata": {}, - "outputs": [], - "source": [ - "from pai.model import Model\n", - "from pai.common.utils import random_str\n", - "\n", - "m = Model(\n", - " inference_spec=bert_inference_spec,\n", - " model_data=bert_model_uri,\n", - ")\n", - "\n", - "p = m.deploy(\n", - " service_name=\"hf_bert_serving_{}\".format(random_str(6)), # 推理服务名称.\n", - " instance_type=\"ecs.c6.xlarge\", # 服务使用的机器实例规格: 4 vCPU, 8 GB\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "5e64254b", - "metadata": {}, - "source": [ - "deploy方法返回的Predictor对象,指向了新创建的推理服务,他提供了`.predict`方法,支持用户向推理服务发送预测请求。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2df66b1f", - "metadata": {}, - "outputs": [], - "source": [ - "res = p.predict(data={\"text\": \"Hello, I'm a [MASK] model.\"})\n", - "\n", - "print(res)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "57f86644", - "metadata": {}, - "source": [ - "在测试完成之后,我们可以通过`predictor.delete_service`删除推理服务,释放资源。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d78a2587", - "metadata": {}, - "outputs": [], - "source": [ - "# 执行完成之后,删除对应的服务\n", - "\n", - "p.delete_service()" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "66b601b9-030e-49c1-8534-b6a53ea7903d", - "metadata": {}, - "source": [ - "## Step3: Finetune BERT预训练模型\n", - "\n", - "[BERT](https://arxiv.org/abs/1810.04805)使用自监督学习方法在大型英文语料库上进行训练,他学习到了英语语言的内在表示,可以通过微调的方式,应用于不同的下游任务,从而获得更好的性能。在当前示例中,我们将使用Huggingface上 Yelp英文评论数据集[yelp_review_full](https://huggingface.co/datasets/yelp_review_full) 对BERT模型进行微调,以获得一个可以用于情感分类的模型。\n", - "\n", - "\n", - "### 准备模型和数据集\n", - "\n", - "在当前步骤中,我们将准备微调训练使用的数据集,然后上传到OSS上供训练作业使用。\n", - "\n", - "> 通过HuggingFace提供的transformers和datasets库可以使用读取本地文件的方式(离线模式),或是从HuggingFace Hub下载模型和数据的方式。为了提高训练作业的执行速度,我们在当前示例中,将模型和数据集准备到OSS,挂载到训练作业执行环境中,供训练作业直接加载使用。\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f760ddcb", - "metadata": {}, - "outputs": [], - "source": [ - "from datasets import load_dataset\n", - "from pai.common.oss_utils import upload\n", - "\n", - "data_path = \"./train_data\"\n", - "\n", - "# 从HuggingFace Hub加载数据集\n", - "dataset = load_dataset(\"yelp_review_full\")\n", - "\n", - "# 保存到数据集,保存的数据集可以通过`datasets.load_from_disk`加载使用\n", - "dataset.save_to_disk(data_path)\n", - "\n", - "train_data_uri = upload(\n", - " source_path=data_path,\n", - " oss_path=\"huggingface/dataset/yelp_review_full/\",\n", - " bucket=sess.oss_bucket,\n", - ")\n", - "\n", - "print(train_data_uri)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "5e518592", - "metadata": {}, - "source": [ - "\n", - "### 准备训练代码\n", - "参考HuggingFace提供的对于[Masked Language Model 的微调文档](https://huggingface.co/course/chapter7/3?fw=tf),我们编写了以下训练脚本,它将使用我们上传的数据集完成模型的微调。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a2534222", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "# 创建代码保存目录\n", - "!mkdir -p train_src" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "0f2a7761", - "metadata": {}, - "source": [ - "\n", - "在我们编写的训练作业脚本中,通过环境变量的方式获取训练作业的超参,输出数据,输出模型保存地址。对于PAI训练服务提供的环境变量的详细介绍,可以见文档:[训练作业预置环境变量](https://help.aliyun.com/document_detail/2261505.html)\n", - "\n", - "完整的训练代码如下:\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "26c62bb8-963e-4ffe-8843-715482896cd3", - "metadata": { - "ExecutionIndicator": { - "show": true - }, - "tags": [] - }, - "outputs": [], - "source": [ - "%%writefile train_src/finetune.py\n", - "\n", - "import os\n", - "\n", - "from datasets import load_dataset, load_from_disk\n", - "from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer, DataCollatorWithPadding, HfArgumentParser\n", - "import numpy as np\n", - "import evaluate\n", - "\n", - "\n", - "def compute_metrics(eval_pred):\n", - " logits, labels = eval_pred\n", - " predictions = np.argmax(logits, axis=-1)\n", - " return metric.compute(predictions=predictions, references=labels)\n", - "\n", - "def tokenize_function(examples):\n", - " return tokenizer(examples[\"text\"], padding=\"max_length\", truncation=True)\n", - "\n", - "\n", - "def train():\n", - " # 通过环境变量获取预训练模型地址, 训练数据,以及模型保存地址\n", - " model_name_or_path = os.environ.get(\"PAI_INPUT_MODEL\", \"bert-base-cased\")\n", - " input_train_data = os.environ.get(\"PAI_INPUT_TRAIN_DATA\")\n", - " output_dir=os.environ.get(\"PAI_OUTPUT_MODEL\", \"./output\")\n", - "\n", - " # 使用环境变量获取训练作业超参\n", - " num_train_epochs=int(os.environ.get(\"PAI_HPS_EPOCHS\", 2))\n", - " save_strategy=os.environ.get(\"PAI_HPS_SAVE_STRATEGY\", \"epoch\")\n", - "\n", - " print(\"Loading Model...\")\n", - " model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path, num_labels=5)\n", - " tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)\n", - "\n", - " print(\"Loading dataset from disk...\")\n", - " dataset = load_from_disk(input_train_data)\n", - " tokenized_datasets = dataset.map(lambda examples: tokenizer(examples[\"text\"], padding=\"max_length\", truncation=True, max_length=512),\n", - " batched=True)\n", - "\n", - " data_collator = DataCollatorWithPadding(tokenizer)\n", - " small_train_dataset = tokenized_datasets['train'].shuffle(seed=42).select(range(1000))\n", - " small_eval_dataset = tokenized_datasets['test'].shuffle(seed=42).select(range(1000))\n", - "\n", - " training_args = TrainingArguments(\n", - " output_dir=output_dir,\n", - " # 使用环境变量获取训练作业超参\n", - " num_train_epochs=num_train_epochs,\n", - " # 使用环境变量获取训练作业保存策略\n", - " save_strategy=save_strategy,\n", - " )\n", - " print(\"TrainingArguments: {}\".format(training_args.to_json_string()))\n", - " metric = evaluate.load('accuracy')\n", - "\n", - " print(\"Training...\")\n", - " trainer = Trainer(\n", - " model=model,\n", - " args=training_args,\n", - " train_dataset=small_train_dataset,\n", - " eval_dataset=small_eval_dataset,\n", - " data_collator=data_collator,\n", - " tokenizer=tokenizer,\n", - " compute_metrics=compute_metrics,\n", - " )\n", - "\n", - " trainer.train()\n", - " print(\"Saving Model...\")\n", - " trainer.save_model()\n", - "\n", - "\n", - "if __name__ == \"__main__\":\n", - " train()\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "6a603b63", - "metadata": {}, - "source": [ - "我们的训练作业将使用PAI提供的PyTorch镜像执行,需要在镜像中安装 `transformers` 和 `evaluate` 库才能够执行相应的训练脚本。通过在训练作业目录下提供 `requirements.txt` 文件,PAI的训练服务会自动安装指定的第三方依赖。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cfab739e", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "%%writefile train_src/requirements.txt\n", - "\n", - "transformers\n", - "datasets\n", - "evaluate\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "b478975d-17bd-4f81-93b8-e3dd32b6b7f1", - "metadata": {}, - "source": [ - "### 提交训练作业\n", - "\n", - "通过PAI Python SDK提供的训练作业API`pai.estimator.Estimator`,我们可以将训练脚本提交到PAI执行。在以下代码中,我们将指定使用的训练代码 `train_src` ,使用PAI提供的PyTorch GPU镜像训练,提交运行微调训练作业。对于使用SDK提交训练作业的详细介绍,用户可以参考文档:[PAI Python SDK提交训练作业](https://help.aliyun.com/document_detail/2261505.html)。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dda481a0-7c85-4b49-b3c5-cc30ca5d3a8c", - "metadata": { - "ExecutionIndicator": { - "show": false - }, - "tags": [] - }, - "outputs": [], - "source": [ - "from pai.huggingface.estimator import HuggingFaceEstimator\n", - "from pai.image import retrieve\n", - "\n", - "\n", - "# 使用 PAI 提供的 PyTorch GPU 训练镜像\n", - "image_uri = retrieve(\n", - " \"PyTorch\", framework_version=\"latest\", accelerator_type=\"GPU\"\n", - ").image_uri\n", - "\n", - "\n", - "# 配置训练作业\n", - "est = HuggingFaceEstimator(\n", - " command=\"python finetune.py\", # 训练作业启动命令\n", - " source_dir=\"./train_src/\", # 训练作业代码\n", - " instance_type=\"ecs.gn6i-c4g1.xlarge\", # 训练使用的作业机器类型, 4 vCPU, 15 GB, 1* T4 GPU\n", - " transformers_version=\"latest\",\n", - " hyperparameters={ # 训练作业超参,用户可以通过环境变量,或是\n", - " \"save_strategy\": \"epoch\",\n", - " \"epochs\": \"1\",\n", - " },\n", - " base_job_name=\"hf-bert-training\",\n", - ")\n", - "\n", - "\n", - "# est = Estimator(\n", - "# image_uri=image_uri, # 训练作业使用的镜像\n", - "# command=\"python finetune.py\", # 训练作业启动命令\n", - "# source_dir=\"./train_src/\", # 训练作业代码\n", - "# instance_type=\"ecs.gn6i-c4g1.xlarge\", # 训练使用的作业机器类型, 4 vCPU, 15 GB, 1* T4 GPU\n", - "# hyperparameters={ # 训练作业超参,用户可以通过环境变量,或是\n", - "# \"save_strategy\": \"epoch\",\n", - "# \"epochs\": \"1\",\n", - "# },\n", - "# base_job_name=\"hf-bert-training\",\n", - "# )\n", - "\n", - "print(est)\n", - "print(est.hyperparameters)\n", - "\n", - "# 提交训练作业到PAI执行\n", - "# 提交之后SDK会打印作业URL,我们可以作业详情页查看训练日志,输出模型,资源使用情况等\n", - "est.fit(\n", - " # 作业使用的预训练模型和数据集使用inputs方式传递\n", - " # 相应的OSS URI会被挂载到作业环境中,用户可以通过 `PAI_INPUT_{ChannelNameUpperCase}` 的环境变量获取挂载后的路径\n", - " inputs={\n", - " \"model\": bert_model_uri,\n", - " \"train_data\": train_data_uri,\n", - " }\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3c0354a7", - "metadata": {}, - "outputs": [], - "source": [ - "# 训练任务产出的模型地址\n", - "print(est.model_data())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "27e83a0d-d02c-42f3-bbd2-c71c775fad82", - "metadata": { - "tags": [] - }, - "source": [ - "### 部署Finetune获得的模型\n", - "\n", - "我们将复用以上推理服务的代码,将微调训练获得的模型部署到PAI-EAS,创建一个在线推理服务。\n", - "\n", - "> Note: 微调模型用于情感分析任务,我们显式得修改HuggingFace pipeline的Task参数。这里我们通过环境变量的方式传入Task参数。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "73a51721-11b9-4f24-b016-5c704de526b8", - "metadata": { - "ExecutionIndicator": { - "show": false - }, - "tags": [] - }, - "outputs": [], - "source": [ - "from pai.model import Model, container_serving_spec\n", - "from pai.image import retrieve, ImageScope\n", - "\n", - "\n", - "# 使用 PAI 提供的 PyTorch CPU 推理镜像\n", - "image_uri = retrieve(\n", - " \"PyTorch\",\n", - " framework_version=\"latest\",\n", - " accelerator_type=\"CPU\",\n", - " image_scope=ImageScope.INFERENCE,\n", - ").image_uri\n", - "\n", - "\n", - "# 构建一个使用镜像部署的InferenceSpec,可以用于将以上产出的BERT模型部署为推理服务.\n", - "inference_spec = container_serving_spec(\n", - " # 模型服务的启动命令\n", - " command=\"python run.py\",\n", - " # 模型服务依赖的代码\n", - " source_dir=\"./serving_src\",\n", - " image_uri=image_uri,\n", - " requirements=[\n", - " \"transformers\",\n", - " \"fastapi\",\n", - " \"uvicorn\",\n", - " ],\n", - " # 使用情感分析任务pipeline,通过环境变量的方式传递给到推理服务脚本。\n", - " environment_variables={\"HF_TASK\": \"sentiment-analysis\"},\n", - ")\n", - "\n", - "print(inference_spec.to_dict())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d57a41f3-a4fc-40d8-92d5-ca083f4de2ee", - "metadata": { - "ExecutionIndicator": { - "show": false - }, - "tags": [] - }, - "outputs": [], - "source": [ - "from pai.model import Model\n", - "from pai.common.utils import random_str\n", - "\n", - "# 使用训练作业产出的模型\n", - "model_data = est.model_data()\n", - "\n", - "m = Model(\n", - " inference_spec=inference_spec,\n", - " model_data=model_data,\n", - ")\n", - "\n", - "p = m.deploy(\n", - " service_name=\"hf_bert_ft_serving_{}\".format(random_str(6)), # 推理服务名称\n", - " instance_type=\"ecs.c6.xlarge\", # 服务使用的机器实例规格: 4 vCPU, 8 GB\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "82e7586d", - "metadata": {}, - "source": [ - "通过Predictor向新创建的推理服务发送预测请求,获取模型预测结果。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "af0b6e0b", - "metadata": {}, - "outputs": [], - "source": [ - "res = p.predict({\"text\": \"i am so happy today\"})\n", - "print(res)\n", - "\n", - "res = p.predict({\"text\": \"i am so sad today\"})\n", - "print(res)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "bc2bdce3", - "metadata": {}, - "source": [ - "在测试完成之后,我们通过`predictor.delete_service`删除推理服务,释放资源。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4fdd724e-e557-4461-9cdb-93874a77c49a", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# 执行完成之后,删除对应的服务\n", - "\n", - "p.delete_service()" - ] - } - ], - "metadata": { - "execution": { - "timeout": 1800 - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.3" - }, - "vscode": { - "interpreter": { - "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/docs/source/tutorial/huggingface_model_deploy/huggingface_model_deploy.ipynb b/docs/source/tutorial/huggingface_model_deploy/huggingface_model_deploy.ipynb deleted file mode 100644 index c04412c..0000000 --- a/docs/source/tutorial/huggingface_model_deploy/huggingface_model_deploy.ipynb +++ /dev/null @@ -1,181 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 部署HuggingFace模型\n", - "\n", - "HuggingFace是一个开源的模型社区,机器学习开发者在社区中可以分享、发现和使用各类机器学习模型。\n", - "\n", - "本文将介绍如何将HuggingFace社区的模型部署到PAI创建模型推理服务。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "## 安装和配置SDK\n", - "\n", - "\n", - "我们需要首先安装PAI Python SDK以运行本示例。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "!python -m pip install --upgrade alipai" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "\n", - "SDK需要配置访问阿里云服务需要的AccessKey,以及当前使用的工作空间和OSS Bucket。在PAI SDK安装之后,通过在**命令行终端** 中执行以下命令,按照引导配置密钥、工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过以下代码验证配置是否已生效。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "\n", - "sess = get_default_session()\n", - "\n", - "# 获取配置的工作空间信息\n", - "assert sess.workspace_name is not None\n", - "print(sess.workspace_name)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 部署HuggingFace模型\n", - "\n", - "在本示例中,我们将使用HuggingFace社区提供的情感分类模型 [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english)部署一个模型在线服务,他支持将一段英文文本分类为正面或负面情感。\n", - "\n", - "通过相应的[模型的详情页](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/tree/main),我们可以获取部署模型所需的信息,包括模型ID(``MODEL_ID``)、模型任务类型(``TASK``)、模型版本(``REVISION``)。\n", - "\n", - "![](../../images/huggingface-model.png)\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "通过PAI Python SDK提供的``HuggingFaceModel``,我们可以轻松地将HuggingFace社区的模型部署到PAI上。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.huggingface import HuggingFaceModel\n", - "\n", - "\n", - "# 初始化一个HuggingFaceModel\n", - "m = HuggingFaceModel(\n", - " command=\"python app.py\", # 模型服务启动命令\n", - " transformers_version=\"latest\", # 使用的transformers版本, 'latest'表示使用PAI目前支持的最新的版本\n", - " environment_variables={\n", - " \"MODEL_ID\": \"distilbert-base-uncased-finetuned-sst-2-english\", # 部署模型的ID\n", - " \"TASK\": \"text-classification\", # 部署的模型任务类型\n", - " \"REVISION\": \"main\", # 部署模型的版本信息\n", - " },\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.common.utils import random_str\n", - "\n", - "\n", - "# 部署模型,创建一个模型在线服务\n", - "p = m.deploy(\n", - " service_name=f\"hf_model_deploy_{random_str(n=8)}\", # 模型服务的名称(地域内唯一)\n", - " instance_type=\"ecs.g6.large\", # 模型服务使用的机器实例规格\n", - " options={\n", - " \"enable_webservice\": True, # 以AIWeb应用的模式启动,支持用户在Web浏览器上使用模型在线服务\n", - " },\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "p.predict(data={\"data\": [\"I love you\"]})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "测试完成之后,删除服务,释放机器资源。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "p.delete_service()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "base", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/source/tutorial/model_deploy_container/model_deploy_container.ipynb b/docs/source/tutorial/model_deploy_container/model_deploy_container.ipynb deleted file mode 100644 index 229bc2b..0000000 --- a/docs/source/tutorial/model_deploy_container/model_deploy_container.ipynb +++ /dev/null @@ -1,376 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 使用镜像部署模型\n", - "\n", - "PAI支持用户使用镜像的方式部署模型,通过镜像,开发者可以自定义模型部署的环境,包括Python、使用的机器学习框架、依赖的第三方库等,能够支持用户灵活的部署需求。详细的介绍可以参考PAI帮助文档:[使用镜像部署模型](https://help.aliyun.com/zh/pai/user-guide/deploy-a-model-service-by-using-a-custom-image)。\n", - "\n", - "PAI Python SDK提供了便利的API,支持用户能够使用自定义镜像,或是PAI提供的预置推理,将一个本地,或是OSS上的模型快捷得部署为模型在线服务。\n", - "\n", - "本文档将介绍,用户如何通过PAI Python SDK通过自定义镜像的方式部署模型。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "## 安装和配置SDK\n", - "\n", - "我们需要首先安装PAI Python SDK以运行本示例。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!python -m pip install --upgrade alipai" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "SDK需要配置访问阿里云服务需要的AccessKey,以及当前使用的工作空间和OSS Bucket。在PAI SDK安装之后,通过在 **命令行终端** 中执行以下命令,按照引导配置密钥、工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过以下代码验证配置是否已生效。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "\n", - "sess = get_default_session()\n", - "\n", - "# 获取配置的工作空间信息\n", - "assert sess.workspace_name is not None\n", - "print(sess.workspace_name)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 部署模型推理服务\n", - "\n", - "模型在线服务包含了模型的文件、模型的推理服务代码、以及推理服务运行环境。\n", - "本示例将使用一个简单的`PyTorch`模型,通过`Flask`和`PAI`提供的`PyTorch`基础镜像,部署模型在线服务。\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "下载示例使用的简单PyTorch模型。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 下载模型到本地 \"model\" 目录\n", - "\n", - "!mkdir -p model/\n", - "!wget https://pai-sdk.oss-cn-shanghai.aliyuncs.com/pai/resources/toy_model.pt -P model/" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 准备推理服务代码\n", - "\n", - "在部署模型之前,我们首先需要准备推理服务的代码,它提供HTTP接口,负责接收预测请求,使用模型进行推理,返回预测结果。\n", - "\n", - "当前示例我们将使用 ``Flask`` 编写一个简单的推理服务,保存为 ``infer_src/app.py`` 文件。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!mkdir -p infer_src" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile infer_src/app.py\n", - "import json\n", - "from flask import Flask, request\n", - "import os\n", - "import torch\n", - "import numpy as np\n", - "\n", - "app = Flask(__name__)\n", - "model = None\n", - "# 默认的模型文件路径\n", - "MODEL_PATH = \"/eas/workspace/model/\"\n", - "\n", - "def load_model():\n", - " \"\"\"加载模型\"\"\"\n", - " global model\n", - " model = torch.jit.load(os.path.join(MODEL_PATH, \"toy_model.pt\"))\n", - " model.eval()\n", - "\n", - "@app.route(\"/\", methods=[\"POST\"])\n", - "def predict():\n", - " data = np.asarray(json.loads(request.data)).astype(np.float32)\n", - " output_tensor = model(torch.from_numpy(data))\n", - " pred_res = output_tensor.detach().cpu().numpy()\n", - " return json.dumps(pred_res.tolist())\n", - "\n", - "if __name__ == \"__main__\":\n", - " load_model()\n", - " app.run(host=\"0.0.0.0\", port=int(os.environ.get(\"LISTENING_PORT\", 8000)))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 获取PAI提供的预置推理镜像\n", - "\n", - "PAI提供了一系列预置的推理镜像,镜像内预置了机器学习框架、常用的第三方库、Python、NVIDIA CUDA库等。我们可以通过以下代码列出所有的预置镜像。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.image import list_images, ImageScope\n", - "\n", - "\n", - "data = [\n", - " [\n", - " \"ImageUri\",\n", - " \"FrameworkName\",\n", - " \"FrameworkVersion\",\n", - " \"AcceleratorType\",\n", - " \"PythonVersion\",\n", - " ]\n", - "]\n", - "\n", - "# 列出常用的PyTorch推理镜像\n", - "for img in list_images(framework_name=\"PyTorch\", image_scope=ImageScope.INFERENCE):\n", - " data.append(\n", - " [\n", - " img.image_uri,\n", - " img.framework_name,\n", - " img.framework_version,\n", - " img.accelerator_type,\n", - " img.python_version,\n", - " ]\n", - " )\n", - "\n", - "# 列出常用的TensorFlow推理镜像\n", - "for img in list_images(framework_name=\"TensorFlow\", image_scope=ImageScope.INFERENCE):\n", - " data.append(\n", - " [\n", - " img.image_uri,\n", - " img.framework_name,\n", - " img.framework_version,\n", - " img.accelerator_type,\n", - " img.python_version,\n", - " ]\n", - " )" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from IPython.display import HTML, display\n", - "\n", - "display(\n", - " HTML(\n", - " \"{}
\".format(\n", - " \"\".join(\n", - " \"{}\".format(\"\".join(str(_) for _ in row))\n", - " for row in data\n", - " )\n", - " )\n", - " )\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "通过SDK提供的 `pai.image.retrieve` API,可以获取指定框架版本的镜像。在当前示例中,我们将使用PAI提供的PyTorch 1.12版本的CPU推理镜像" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.image import retrieve, ImageScope\n", - "\n", - "# # 获取PyTorch 1.10 GPU推理镜像\n", - "# print(retrieve(\n", - "# framework_name=\"PyTorch\", # 框架名称\n", - "# framework_version=\"latest\", # 框架版本\n", - "# accelerator_type=\"gpu\", # 选择支持Nvidia CUDA GPU的镜像\n", - "# image_scope=ImageScope.INFERENCE, # 镜像类型,推理镜像\n", - "\n", - "# # ).image_uri)\n", - "\n", - "# 获取最新的PyTorch CPU推理镜像\n", - "torch_image_uri = retrieve(\n", - " framework_name=\"PyTorch\", # 框架名称\n", - " framework_version=\"1.12\", # 框架版本,latest表示使用PAI支持的最新版本\n", - " # accelerator_type=\"cpu\", # 默认使用CPU镜像\n", - " image_scope=ImageScope.INFERENCE, # 镜像类型,推理镜像\n", - ").image_uri\n", - "print(torch_image_uri)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 部署推理服务\n", - "使用以上的推理服务代码,以及PyTorch推理镜像,我们将一个PyTorch模型部署为模型在线服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.model import Model, container_serving_spec\n", - "\n", - "\n", - "m = Model(\n", - " model_data=\"./model/\", # 模型文件,可以是一个本地文件或是OSS Bucket路径(例如 oss:///path/to/model ),\n", - " inference_spec=container_serving_spec(\n", - " image_uri=torch_image_uri, # 推理服务使用的镜像\n", - " command=\"python app.py\", # 模型推理服务启动命令\n", - " source_dir=\"./infer_src/\", # 推理服务代码所在目录\n", - " requirements=[\"flask==2.0.0\", \"Werkzeug==2.3.4\"], # 推理服务依赖的Python包\n", - " ),\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.common.utils import random_str\n", - "\n", - "# 部署模型服务\n", - "p = m.deploy(\n", - " service_name=f\"toy_model_{random_str(6)}\", # 模型服务名称, 地域内唯一\n", - " instance_type=\"ecs.c6.large\", # 模型服务使用的机器实例规格\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 调用推理服务\n", - "\n", - "部署服务后返回的`pai.predictor.Predictor`对象可以用于调用推理服务,发送预测请求。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "\n", - "# 构造一个随机数组输入\n", - "dummy_input = np.random.rand(1, 10, 10).tolist()\n", - "print(dummy_input)\n", - "\n", - "result = p.raw_predict(\n", - " data=dummy_input,\n", - ")\n", - "\n", - "# 打印推理结果\n", - "print(result.json())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "在测试完成之后,删除推理服务" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "p.delete_service()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "base", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/source/tutorial/modelscope_model_deploy/modelscope_model_deploy.ipynb b/docs/source/tutorial/modelscope_model_deploy/modelscope_model_deploy.ipynb deleted file mode 100644 index 51c7c5b..0000000 --- a/docs/source/tutorial/modelscope_model_deploy/modelscope_model_deploy.ipynb +++ /dev/null @@ -1,216 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "tags": [ - "keep-output" - ] - }, - "source": [ - "# 部署ModelScope模型\n", - "\n", - "[ModelScope](https://www.modelscope.cn/)是一个开源的模型社区,提供了丰富的自然语言处理、计算机视觉、多模态等领域开源模型,并提供了[ModelScope library](https://github.com/modelscope/modelscope),支持开发者可以方便得获取模型,使用模型进行推理。\n", - "\n", - "PAI支持开发者将ModelScope上的模型,简单快捷得部署为在线推理服务,本文将介绍使用PAI Python SDK完成ModelScope模型的部署。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 安装和配置SDK\n", - "\n", - "我们需要首先安装PAI Python SDK以运行本示例。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "skip-execution" - ] - }, - "outputs": [], - "source": [ - "!python -m pip install --upgrade alipai" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "SDK需要配置访问阿里云服务需要的AccessKey,以及当前使用的工作空间和OSS Bucket。在PAI SDK安装之后,通过在 **命令行终端** 中执行以下命令,按照引导配置密钥、工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过以下代码验证配置是否已生效。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "\n", - "sess = get_default_session()\n", - "\n", - "# 获取配置的工作空间信息\n", - "assert sess.workspace_name is not None\n", - "print(sess.workspace_name)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 部署ModelScope模型\n", - "\n", - "当前示例,我们将使用ModelScope上的[\"CSANMT连续语义增强机器翻译-英中-通用领域-large\"](https://modelscope.cn/models/damo/nlp_csanmt_translation_en2zh/summary)模型,他支持英文到中文的翻译任务。\n", - "\n", - "通过ModelScope的模型详情页,我们可以获取部署模型所需要的信息,包括**模型ID**,**模型版本**,以及**任务类型**,然后通过 `pai.modelscope.ModelScopeModel` 类,创建一个ModelScope模型对象,完成模型部署。\n", - "\n", - "![](../../images/modelscope-model.png)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.modelscope.model import ModelScopeModel\n", - "\n", - "# 配置待部署的模型信息\n", - "m = ModelScopeModel(\n", - " command=\"python app.py\", # 默认的ModelScope模型推理服务启动命令\n", - " modelscope_version=\"latest\", # ModelScope library的版本号,latest表示最新版本\n", - " environment_variables={\n", - " \"MODEL_ID\": \"damo/nlp_csanmt_translation_en2zh\", # ModelScope的模型ID\n", - " \"TASK\": \"translation\", # 模型的任务类型\n", - " \"REVISION\": \"v1.0.1\", # 模型的版本号\n", - " },\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.common.utils import random_str\n", - "from pai.predictor import Predictor\n", - "\n", - "# 部署模型,在PAI-EAS创建一个推理服务\n", - "p: Predictor = m.deploy(\n", - " service_name=\"ms_model_{0}\".format(random_str(8)), # 配置推理服务名称\n", - " instance_type=\"ecs.gn6i-c4g1.xlarge\", # 配置推理服务实例规格\n", - " options={\n", - " \"metadata.rpc.keepalive\": 20000, # 配置推理服务RPC超时时间: 20s\n", - " },\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "通过以上方式部署的模型推理服务,支持通过空字符串的预测请求,获取模型的输入输出信息。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pprint import pprint\n", - "from pai.predictor import RawResponse\n", - "\n", - "# 通过一个空的预测请求,获取模型的推理输入输出的数据格式\n", - "res: RawResponse = p.raw_predict(data=\"\")\n", - "\n", - "pprint(res.json())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "基于以上获得的输入数据格式信息,我们可以构建相应的预测请求,发送给到推理,获取翻译结果。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "res = p.predict(\n", - " # 参考以上的获得的输入输出数据格式,配置推理请求的数据\n", - " data={\n", - " \"input\": {\n", - " \"text\": \"Alibaba Group's mission is to let the world have no difficult business\"\n", - " }\n", - " }\n", - ")\n", - "pprint(res)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "在测试完成之后,删除推理服务,释放机器资源。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 删除推理服务\n", - "p.delete_service()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "base", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/source/tutorial/modelscope_vit/modelscope_vit.ipynb b/docs/source/tutorial/modelscope_vit/modelscope_vit.ipynb deleted file mode 100644 index 106f994..0000000 --- a/docs/source/tutorial/modelscope_vit/modelscope_vit.ipynb +++ /dev/null @@ -1,638 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 使用ModelScope ViT模型完成图像分类模型微调和部署\n", - "\n", - "## 背景介绍\n", - "\n", - "\n", - "[ModelScope](https://www.modelscope.cn)是一个旨在为泛AI开发者提供灵活、易用、低成本的一站式“模型即服务”(MaaS)的开源平台。它汇集了丰富的预训练模型,覆盖了NLP、CV、Audio、AIGC、多模态大模型等多个领域。利用ModelScope所提供的模型以及ModelScope Library,开发者可以用一行代码实现模型推理,或者用十几行代码实现对预训练模型的调优训练,方便开发者基于行业数据集快速构建专属行业模型。\n", - "\n", - "当前示例中,我们以[ViT图像分类-通用](https://modelscope.cn/models/damo/cv_vit-base_image-classification_ImageNet-labels/summary) 为示例,展示如何在PAI完成一个ModelScope模型的微调训练,然后将获得的模型部署为一个在线推理服务的过程。主要流程包括:\n", - "\n", - "1. 准备工作:\n", - "\n", - "安装PAI Python SDK,并完成SDK配置。\n", - "\n", - "2. 模型的微调训练\n", - "\n", - "编写微调训练脚本,使用[花朵分类](https://www.modelscope.cn/models/zydfx1111/flower)数据集对模型进行微调训练,以获得一个可以用于花朵分类的模型。\n", - "\n", - "3. 部署推理服务\n", - "\n", - "将微调训练作业输出的模型,部署到PAI-EAS,创建一个在线推理服务。\n", - "\n", - "## 前提条件\n", - "\n", - "- 已获取阿里云账号的鉴权AccessKey ID和AccessKey Secret,详情请参见:[获取AccessKey](https://help.aliyun.com/document_detail/116401.html)。\n", - "- 已创建或是加入一个PAI AI工作空间,详情请参见:[创建工作空间](https://help.aliyun.com/document_detail/326193.html)。\n", - "- 已创建OSS Bucket,详情请参见:[控制台创建存储空间](https://help.aliyun.com/document_detail/31885.html)。\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Step1: 准备工作\n", - "\n", - "我们将使用PAI提供的Python SDK,提交训练作业,部署模型。可以通过以下命令安装PAI Python SDK。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "skip-execution" - ] - }, - "outputs": [], - "source": [ - "!python -m pip install --upgrade alipai" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "SDK需要配置访问阿里云服务需要的 AccessKey,以及当前使用的工作空间和OSS Bucket。在PAI Python SDK安装之后,通过在 **命令行终端** 中执行以下命令,按照引导配置密钥,工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过以下代码验证当前的配置。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "sess = get_default_session()\n", - "\n", - "assert sess.workspace_name is not None\n", - "print(sess.workspace_name)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Step2: 提交微调训练作业\n", - "\n", - "ModelScope的[ViT图片分类-通用](https://modelscope.cn/models/damo/cv_vit-base_image-classification_ImageNet-labels/summary)模型使用经典的[ViT Base](https://github.com/google-research/vision_transformer)模型结构,在ImageNet-1k数据集进行预训练,可以直接用于[ImageNet 1k标签](https://deeplearning.cms.waikato.ac.nz/user-guide/class-maps/IMAGENET/)覆盖图像的分类任务,也可以作为下游任务的预训练模型。\n", - "\n", - "当前示例,我们将以[花朵分类数据集](https://www.modelscope.cn/datasets/tany0699/flowers14/summary)对模型进行微调训练,从而获得一个可以用于花朵分类的模型。\n", - "\n", - "### 准备微调训练脚本\n", - "\n", - "ModelScope提供了功能完善的Python Library,能够支持用户方便得使用ModelScope模型进行推理以及微调训练,在本示例中,我们将使用ModelScope Library编写相应的微调训练脚本,然后提交到PAI执行微调训练作业。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "# 准备相应训练作业脚本目录\n", - "!mkdir -p train_src" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "完整的微调训练脚本代码如下:\n", - "\n", - "> 对于ModelScope library的使用介绍,请参见:[ModelScope文档](https://www.modelscope.cn/docs/ModelScope%20Library%E6%A6%82%E8%A7%88%E4%BB%8B%E7%BB%8D)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile train_src/finetune.py\n", - "\n", - "import os\n", - "import re\n", - "import logging\n", - "import shutil\n", - "\n", - "\n", - "from modelscope.msdatasets import MsDataset\n", - "from modelscope.metainfo import Trainers\n", - "from modelscope.trainers import build_trainer\n", - "\n", - "\n", - "# 从环境变量中获取超参(由PAI的训练服务注入)\n", - "BATCH_SIZE = int(os.environ.get(\"PAI_HPS_BATCH_SIZE\", 16))\n", - "LEARNING_RATE = float(os.environ.get(\"PAI_HPS_INITIAL_LEARNING_RATE\", 1e-3))\n", - "NUM_EPOCHS = int(os.environ.get(\"PAI_HPS_EPOCHS\", 1))\n", - "NUM_CLASSES = int(os.environ.get(\"PAI_HPS_NUM_CLASSES\", 14))\n", - "MODEL_ID_OR_PATH = os.environ.get(\"PAI_INPUT_MODEL\", \"damo/cv_vit-base_image-classification_ImageNet-labels\")\n", - "\n", - "# 通过环境变量获取输出模型,和checkpoints保存路径\n", - "OUTPUT_MODEL_DIR = os.environ.get(\"PAI_OUTPUT_MODEL\", \"./model/\")\n", - "WORK_DIR = os.environ.get(\"PAI_OUTPUT_CHECKPOINTS\", \"./checkpoints/\")\n", - "\n", - "\n", - "# 将产出的模型保存到模型输出目录(OUTPUT_MODEL_DIR)\n", - "def save_model():\n", - " best_ckpt_pattern = re.compile(\n", - " pattern=r\"^best_accuracy_top-1_epoch_\\d+.pth$\"\n", - " )\n", - " print(\"Saving best checkpoint as pytorch_model.pt\")\n", - " print(\"List work dir: \", os.listdir(WORK_DIR))\n", - "\n", - " f_name = next((f for f in os.listdir(WORK_DIR) if best_ckpt_pattern.match(f)), None)\n", - " if f_name:\n", - " # 使用最佳checkpoints作为输出模型\n", - " print(\"Found best checkpoint: \", f_name)\n", - " shutil.copyfile(\n", - " src=os.path.join(WORK_DIR, f_name),\n", - " dst=os.path.join(OUTPUT_MODEL_DIR, \"pytorch_model.pt\"),\n", - " )\n", - " os.remove(os.path.join(WORK_DIR, f_name))\n", - " else:\n", - " # 如果没有,则使用最后一个epoch的checkpoints作为输出模型\n", - " print(\"Not found best checkpoint.\")\n", - " last_ckpt_file = \"epoch_{}.pth\".format(NUM_EPOCHS)\n", - " if os.path.isfile(os.path.join(WORK_DIR, last_ckpt_file)):\n", - " shutil.copyfile(\n", - " src=os.path.join(WORK_DIR, last_ckpt_file),\n", - " dst=os.path.join(OUTPUT_MODEL_DIR, \"pytorch_model.pt\"),\n", - " )\n", - " else:\n", - " print(\"Not found latest checkpoint: {}.\".format(os.path.join(WORK_DIR, last_ckpt_file)))\n", - " # 模型配置信息\n", - " shutil.copyfile(\n", - " src=os.path.join(WORK_DIR, \"configuration.json\"),\n", - " dst=os.path.join(OUTPUT_MODEL_DIR, \"configuration.json\"),\n", - " )\n", - "\n", - "\n", - "# 修改配置文件\n", - "def cfg_modify_fn(cfg):\n", - " cfg.train.dataloader.batch_size_per_gpu = BATCH_SIZE # batch大小\n", - " cfg.train.dataloader.workers_per_gpu = 8 # 每个gpu的worker数目\n", - " cfg.train.max_epochs = NUM_EPOCHS # 最大训练epoch数\n", - " cfg.model.mm_model.head.num_classes = NUM_CLASSES # 分类数\n", - " cfg.model.mm_model.train_cfg.augments[0].num_classes = NUM_CLASSES # 分类数\n", - " cfg.model.mm_model.train_cfg.augments[1].num_classes = NUM_CLASSES # 分类数\n", - " cfg.train.optimizer.lr = LEARNING_RATE # 学习率\n", - " cfg.train.lr_config.warmup_iters = 1 # 预热次数\n", - "\n", - " # Note: OSS挂载到输出路径中,不支持软链接.\n", - " cfg.train.checkpoint_config.create_symlink = False\n", - "\n", - "\n", - " return cfg\n", - "\n", - "def train():\n", - " ms_train_dataset = MsDataset.load(\n", - " 'flowers14', namespace='tany0699',\n", - " subset_name='default', split='train') # 加载训练集\n", - "\n", - " ms_val_dataset = MsDataset.load(\n", - " 'flowers14', namespace='tany0699',\n", - " subset_name='default', split='validation') # 加载验证集\n", - "\n", - "\n", - " # 构建训练器\n", - " kwargs = dict(\n", - " model=MODEL_ID_OR_PATH, # 模型id\n", - " work_dir=WORK_DIR,\n", - " train_dataset=ms_train_dataset, # 训练集 \n", - " eval_dataset=ms_val_dataset, # 验证集\n", - " cfg_modify_fn=cfg_modify_fn # 用于修改训练配置文件的回调函数\n", - " )\n", - " trainer = build_trainer(name=Trainers.image_classification, default_args=kwargs)\n", - "\n", - " # 进行训练\n", - " trainer.train()\n", - "\n", - " # 进行评估\n", - " result = trainer.evaluate()\n", - " print('Evaluation Result:', result)\n", - "\n", - " # 保存模型\n", - " save_model()\n", - "\n", - "if __name__ == \"__main__\":\n", - " train()\n", - " " - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "在当前的训练作业中,我们将使用PAI提供的PyTorch训练镜像,需要在镜像中安装ModelScope Library。通过在训练作业脚本目录下准备一个`requirements.txt`文件,可以在训练作业启动时,自动安装依赖的第三方库。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile train_src/requirements.txt\n", - "\n", - "\n", - "# 部分ModelScope依赖library由ModelScope Host,需要显式配置以下参数\n", - "--find-links https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html\n", - "modelscope[cv]==1.3.1" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "完整的训练作业脚本目录结构如下:\n", - "\n", - "```shell\n", - "\n", - "train_src\n", - " ├── finetune.py\n", - " └── requirements.txt\n", - "\n", - "```\n", - "\n", - "后续我们将通过PAI Python SDK将训练脚本提交到PAI执行。\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 提交训练作业到PAI\n", - "\n", - "SDK提供了High-Level的API,`pai.estimator.Estimator`,支持用户方便地使用镜像配合训练脚本,提交训练作业到PAI。以下代码中,我们将使用以上的训练作业脚本(`train_src`目录),配合PAI提供的PyTorch训练镜像,提交一个训练作业。\n", - "\n", - "对于如何使用SDK提交训练作业的详细介绍,可以见文档:[提交训练作业](https://help.aliyun.com/document_detail/2261505.html)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.estimator import Estimator\n", - "from pai.image import retrieve\n", - "\n", - "\n", - "# 使用PAI提供的最新的PyTorch GPU镜像\n", - "torch_img_uri = retrieve(\n", - " \"PyTorch\",\n", - " \"latest\",\n", - " accelerator_type=\"gpu\",\n", - ").image_uri\n", - "\n", - "# 使用训练配置信息,创建Estimator对象\n", - "est = Estimator(\n", - " command=\"python finetune.py\", # 训练作业的启动命令\n", - " source_dir=\"train_src\", # 训练作业脚本本地目录(绝对路径,或是相对路径)\n", - " image_uri=torch_img_uri, # 作业的镜像类型\n", - " # instance_type=\"ecs.gn6e-c12g1.3xlarge\", # 12vCPU 92GiB NVIDIA V100 × 1 (32GB GPU memory)\n", - " instance_type=\"ecs.gn7i-c8g1.2xlarge\", # 8vCPU 30GiB NVIDIA A10 × 1 (24GB GPU Memory)\n", - " base_job_name=\"vit-finetune\", # 作业名称\n", - " hyperparameters={ # 训练作业超参,用户可以通过环境变量或是读取配置文件的方式获取.\n", - " \"batch_size\": 128,\n", - " \"initial_learning_rate\": 1e-4,\n", - " \"epochs\": 2,\n", - " # 花朵数据集一共14个分类\n", - " \"num_classes\": 14,\n", - " },\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "通过`fit` API提交训练作业。当前示例中,我们在训练脚本中使用ModelScope的library去下载数据集。当用户需要使用自定义数据集时,可以通过`fit`方法传递相应数据OSS路径,训练作业会通过挂载的方式将相应的数据准备的执行环境中。\n", - "\n", - "```python\n", - "\n", - "est.fit(\n", - "\t# 用户的训练作业脚本可以通过环境变量 PAI_INPUT_{ChannelNameUpperCase} 获得数据的本地路径.\n", - "\t\"train\": \"oss:///train/data/path/\",\n", - ")\n", - "\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "est.fit()" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "训练作业执行成功之后,用户可以通过`estimator.model_data()`获取相应产出模型的OSS路径" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Step3: 部署推理服务\n", - "\n", - "PAI-EAS是PAI提供的推理服务部署平台,支持使用Processor或是镜像的方式部署推理服务。在以下的流程中,我们将使用微调获得的模型,使用镜像部署的方式部署一个在线推理服务。\n", - "\n", - "### 准备推理服务使用的代码\n", - "\n", - "镜像部署的模式,要求用户提供一个推理服务程序,他负责加载模型,提供HTTP API,以支持接受用户推理请求,调用模型处理推理请求,返回推理结果。在当前示例中,我们将使用[FastAPI](https://fastapi.tiangolo.com/)编写一个推理服务程序,加载以上训练作业输出的模型,在PAI创建一个推理服务。\n", - "\n", - "我们首先创建一个目录(`serve_src`),用于保存的推理服务程序代码。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!mkdir -p serve_src/" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "我们准备的推理服务程序,支持用户通过HTTP POST发送的图片,然后调用ModelScope的推理pipeline获取预测结果,返回给到用户。\n", - "\n", - "> ModelScope 推理pipeline返回的结果中带有`numpy.ndarray`数据,需要我们通过自定义Encoder将其序列化。\n", - "\n", - "完整代码如下,我们将其保存到`serve_src`目录下,用于后续创建推理服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile serve_src/run.py\n", - "\n", - "import os\n", - "import io\n", - "import json\n", - "\n", - "import uvicorn\n", - "from fastapi import FastAPI, Response, Request\n", - "import numpy as np\n", - "\n", - "from modelscope.pipelines import pipeline\n", - "from modelscope.utils.constant import Tasks\n", - "from PIL import Image\n", - "\n", - "# 用户指定模型,默认会被加载到当前路径下。 \n", - "MODEL_PATH = \"/eas/workspace/model/\"\n", - "\n", - "class NumpyEncoder(json.JSONEncoder):\n", - "\n", - " def default(self, obj):\n", - " if isinstance(obj, np.ndarray):\n", - " return obj.tolist()\n", - " elif isinstance(obj, np.generic):\n", - " return obj.item()\n", - " else:\n", - " return json.JSONEncoder.default(self, obj)\n", - "\n", - "app = FastAPI()\n", - "\n", - "@app.post(\"/\")\n", - "async def predict(request: Request):\n", - " global p\n", - " content = await request.body()\n", - " img = Image.open(io.BytesIO(content))\n", - " res = p(img)\n", - " return Response(content=json.dumps(res, cls=NumpyEncoder), media_type=\"application/json\")\n", - "\n", - "\n", - "if __name__ == '__main__':\n", - " p = pipeline(\n", - " Tasks.image_classification,\n", - " model=MODEL_PATH,\n", - " )\n", - " uvicorn.run(app, host='0.0.0.0', port=8000)\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 创建推理服务\n", - "\n", - "我们将使用PAI提供的ModelScope推理镜像,使用以上的推理服务程序,创建一个推理服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.model import container_serving_spec\n", - "from pai.session import get_default_session\n", - "from pai.model import Model\n", - "from random import randint\n", - "\n", - "\n", - "# 使用PAI QuickStart提供的ModelScope的镜像创建推理服务\n", - "image_uri = (\n", - " \"registry.{}.aliyuncs.com/paiflow-public/quickstart:modelscope-1.2.0\".format(\n", - " get_default_session().region_id,\n", - " )\n", - ")\n", - "\n", - "\n", - "# 创建一个Model对象,他可以用于创建推理服务\n", - "m: Model = Model(\n", - " # 使用以上训练作业产出的模型\n", - " model_data=est.model_data(),\n", - " # 配置模型的推理配置,包括使用的镜像,使用的推理服务脚本,推理的依赖包等。\n", - " inference_spec=container_serving_spec(\n", - " source_dir=\"./serve_src/\",\n", - " command=\"python run.py\",\n", - " image_uri=image_uri,\n", - " requirements=[\n", - " \"fastapi\",\n", - " \"uvicorn\",\n", - " ],\n", - " ),\n", - ")\n", - "\n", - "\n", - "print(m.inference_spec.to_dict())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "指定推理服务的名称,以及使用的机器实例规格,创建一个推理服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.predictor import Predictor\n", - "\n", - "\n", - "p: Predictor = m.deploy(\n", - " service_name=\"modelscope_vit_{}\".format(randint(0, 100000)),\n", - " instance_type=\"ecs.c6.xlarge\",\n", - " options={\n", - " # 推理镜像较大的镜像下,需要配置额外的磁盘空间\n", - " \"features.eas.aliyun.com/extra-ephemeral-storage\": \"40GB\"\n", - " },\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "`model.deploy` 返回的`Predictor`对象可以用于向相应的推理服务发送请求,获得推理结果。\n", - "\n", - "\n", - "这里我们使用已经准备的一张花朵图片测试推理服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "keep_output" - ] - }, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQAAAAEACAIAAADTED8xAAEAAElEQVR4nJz9SbMsy5oYCn2Nu0dE5mp2d84+Td2+SqWyMoRUpSc9wMCAESOYMMWMGQOmTPgfjPkhPAMmGAjZk6okIanqNnXvPeeefu+9usyMCPevYeARkZG51j733nLbtnZmZDQe7l/f4v/lv/u/AgAAIOLyd/2hfl6GowGAu4O5u5uZmaF7jJGZIwciQkSaR1KrF9arzKz+3W639QQAMDNVrc+Kgc3MwOtXBRVVVVVQd3cEZFItOWdDYGaQyMzMjIg+DwBYJhBCWE5ARBhK/RDmOSIiIaoqES3zt3mEeR2YOYSwvEgpZXn95XGIaAh1Al6vo7poqKrTxOZVdXd0aAQMfATLYIVxp/nLN99+8d03n33zTVEpIkShTU3btk1MzByDiAhR2Gw2bdOFELpmc9F2beBg0HG4DDEh+DiCFALcYwOr4TTto8FxrRyxLhERIYGZiYiqglrdSkRknK7k060EFUQERER293pbdZg/oKqKaSmlfgCi5fXP/i6Qtv4QmZdzlk1x97pfdW7Lpri7AamqiNQtY+blcXUO9TgAlFJKKQHeM5Y5rdHA3QFn9EBwd5yf/3jqZ/dZUKiCy9M3f89ARARcFmLZOvdpQ+tCLL8uz5o29RSZ4RS961UTep/ux/rX5cMCN+sbfv8gAAeA+cwK+hXHDdwQ6jdVHYbhfr+7v78fhkFMRZXICJAAwTwQCRczQBQ3GPqRKDTx0Ddtw+GD6+uL1BRTNGtiCCG6FDRc3miG1EcrsPpcidFC15ZrHdzdicjnVzh7x7Mj01PmbSciMyMiIFrOXK/2GfCcQdETS3q6rUegOL1o2btlDutHENF7EeDs+rPP6ADzHQPRGuZOVsHPX4+I1nT68ZI9HvN9EbHebxrmTkSMxEiBuCLV+oVxZkQ44eu0OE/iwPqVlwV93/H1U04miQhwCgf1oTPVQAeACu5ePwu4ARi4uA1Sdvv9/f397f2u73t1c3dCIwMwNxEiCrEyQipZzMDMIqcuNZGw5XjVbdnBHAOniDCK0rw769WEeUrTq80vaGZqE8c1MzSvq0pE5vZ4m+q2rJfIl5WrD/LjyhCRO8AMALDCmQV+1ov55B6d/bSmejNU+fvoVJ3D+sj3IcAZmh4f6SerQEQ4AwStZKWJaD2a+uOZrd/qibeFJ9Zi/aqBOXAINAk5J1N9jJN+PPr4Wev3fZJKPcaNsye+b6ADwQpYEM3NEdBcIhXVUS2b9nncD30/DJU7O1aRxQoUAkA1IkIgJALEnGUs6qqEkjmT20Wz3cbmxcVFmxonLiIGE8U5vtH87g7HF3SAKlcgYpHi7hUBaL6ikhtEXCjo2YvbxI/95Lbu7jhJShXgkCqkV7kRT9n+Y2p7Bij1yBnowyMEWN+wsp31g87A770IcIbo53g//+UZoBcZcaJzeH4TeA+4rN8KHuPb6YBTpomIKQRmjiEsCLCgGTwF1itW9cRLnS2Tu6/lluUmZ5z05A447cBEKeZ/BMiOy1MQ0NzNvSCNbqOWQUqfx7FkA6wKFfhEQwjAtd4NTYEQAVAM3NGAwHzQIsPwWfkdFm1/9tPr7YUCmDuFSHCCAIbzhE8hb3nxqqtMItCZPDFjy5NQcQp/E9yrgZmZ23pbF+D+/t1fHz+7cP3o+qAzgoWnCiHOOlh9tWW2iPhHIMD81CNghRV7PUp7gIAnYPGYZpxR2RN4fcS2Tm4C6DC9PxEycyAOHBbldTm5vudxexawO92281f2E6KyXtM1AvgjEeh9g3xCAICJQsC8gvXug0sv+ZCHUcqhjH0RdecYmtRNTzQDQzQHBnRQdUdHcDNAJGYyBZHSD8Phbgdqz6+vL7cXmxACcohRRZYJuzvRvAsr4MYZjGCN+Wdi5IoUr0FzvZsLAvjxPucMEycJcKZTAAuPOtvrx8u7HDzbmvUJbsdnneHGMtaw8UcjQL0bnYo0OIt6tCKrOOu7OCup+JT88+jdnvjp8UIQETAxcyIOxAGJiY8nAOqpqObLriwrOz/ojKLAUxre+vgaPp48YRnkE6At5z2ez0Me+rHv8yimfZGx5KLijjFGd3dVMTRXBwdzIAAgMFQ3ByAMzFxci5aibmY39/e/+/Lri6Z7/erlZdfaY6VrkV2RlndxM1jhwHrZF2Mawbx3p8SLjrQWfKWDzm96AtmOJ8C3Bug19K8/nBGykzVcjeOtVBdIg1Mii3gUCJcjfzQC4CNVchF41tBPPhO/p7D5fRD2/XM4mwwgViW46htVXJhAcyWBzI8E8AqNv4d4rxfOVyLQ+yaPp7LTdHD+hvNqQIWVukkAAGAIhrCX3GseVdUtm44qpWgWYWYzMwUEQ3PDam9FntQ2AycOzDGJjQpOHDmhmr25eff11cXF5abrOjAJp5Kez3PmGRTMjE6JOqw3d4ab9Rs9uQLL4qyXdy1oLgiwBsHlw9kTl1/PEGYhPY8p6QL3awxZ/i7Qv74/EYWFQixybRWmF6PK2RSrDXWFDFhN3YvKSw5EtGgEYJOBf7n/YvuvN190lMWmviyZmQFN1tzqB1CxxU5HfNTo692qqd7dqxn48agkalmjRWqieVa4Ytl1MqWU5d39kZa2XqX6VV0rlAQkrjYHtRWVQnMXtwLWl3EYhhG1mBc3MRVTnW3yIYRSVDQjInIIRMyhPkVEDLBCPyIicEwtMzccUEUB7g/9Qz9cXQmFhvyEUvoCRj69xdl4kujA2nI9W+KnV65kKIRS1N2B0N2z6Gywx3qOu6uquj22gNchInhqzl48J4/hcAHX9R3qfjHzsoMVVs2slDLt74wGiybwB3EAWKOm64RAfs4EEJFmbgBHGvCIcp9+Xm8DPFKSFlBbcAPmHa24CivkhEdI/763ezyxs+NHUMDzUU9bDKxwShGZGcwBgACJiByMJuR3QgAwRjMYs+zHYTcc+mBZSjZR1SzmCBQ4QkInZhAm4oBgSIRMyEdxeZoMIQUmZUAjYiYEN0UobgKuCKZPy8FwCv3vW6JFAprM0Kcscf11vWXfN+yc/E/rWbmFT9p5NRs4TA61kyc+muTj4+uDC7FbnLALgJ0gwJN07jH4kk/G3eMazQoVzWgA1QE60fKn5Y0FgNazcffKNNfGrPWb1zOrn5JiwFnpwbNdOUW/U8ptZy9FRLicf2piqhc8QoGJE9bPZ0sfiB29in+hro1P7l9xt/oB/CD5Xb97eHjYbYJI5WtQDf/MTBRUld1DiGYzq2EGXETYyk4diTEYO0MxDsxIrGagWcZRJZGtFcEKr2cL/hgHHq9PFTXh1BIwr+fJNq0XZKFWsFA9OFG9zi6ERwNPjSJPUrezrwjnoF8/hBCqJ3htgTWz8D7U+b3zm1ZnFlhodTo9JU2e/V178tbLiis1BVZ7tjzR3auZHJiapqn+pLPlOHvWekUWhFwPekRFjms3s87jyUQwI8B6oeoHQiQHnzWNBbwMoZgagBGPJrsy3B52d4eHHjfT4xBsUhkZEdzdiJGUmQ0RgB3JmbGM5gZADgoAiF7DPRyUQwhcgzqgl3wofaMpybkMjbN93U+DC9YkZlm9xXpxBIBzZnu+5utnnUMLkuupde6pNT+7A56KA2fvshyfCdbTuLQIq2a2WMzdPTyml3Ws5TxceUBodfLa8wVzzM+i8SyA/r43/L1L8PgEmH31pRQYKcao6cJPJVd45Cc/+bye86MnrgF6OenxRp4AxMowioho7tVUYq4AYK5uZlbcRhMhcOBdGe/6/V2/vx/7EigSp5TqDdWh+m6IiMiZWVURyBGc0AjQBHAKrTBQQEYGQgwQOFIITIbIkDUPeezz0MopBM/TXuSBRfVar9XjJV3j83pTrHqLH93/8d4tK/7kttIp4VsdBH8kEj/e1uWns31Zz3y553oTT0Sgsx09u/sZSKFPlu0q6VZVFx+f9n5B/DHuHZfx9FWX84+aAICq5pwlqFavar2S6n3ofAsdKjteqefHuR03dUUI12+xgPjZfHyWKZdf6yF3d/OqUoupuAl4URnNDfWh37/d3d/1+16y5+gxglJAKkVFxMAZEYEoAJpSiK7qCEDkiFNwHfFiWGaqQWxOIXBgdmZCBxglD3kUiXWP5rCwE7P9Qv7ft1/H0+CIACd879Ty40/yXl9IkoPNt50vr1+rQeWcGjrQKnTiSRBaP6hu69kOLtACs4q8cIOJA5yduh4ruJzBpt59hv71CfDo5Z+853FdHj1ltSarDVhJbJURhRBijE6TbeHMVLxW89fYj4tyMvNKOCUnFQGW6M4nQWG51dqheLI9FRkmPgCzhGGOoO4KNma5P+xvH+4fhkNxo5zr5eIwDEMphZlDTGbGCiEEBXTCRRXhgISMTMRMjESEjAYYoGGmyBQJIyEFUrdRxho0iaduVDgF5ceUaD56NM2dIcDjfcRTmeTJUemlr5QQWAD3KR0aEedQG19Q8eycc3CHE3xY7lMRIISwvEVdliesQD5bJ9eLcmQcp0otIvKRuZ0zTXoab8/nt36Qz2bTs5N9pb+HEMxMYbIO1V/PcHVtJlujwSKS4ermZw86I/nrrTq75H2+xvX50wlMNVa4mPR5PIzDIY/IxDpZ1UBtHEcRCSGEEESkXsUO5s4AhgCIzFztX0izIWHeGiZkphA51tOq4fEUyh+v6mMEWL/mAnO+dpytFucPHxV23nfR2VIfCe5TkhicIh6cbuKTd67h01X6r1J0vX8I4QjoAE7zNUTVKG6IiG5sUClJtc+YCSKiozvZtAnVgAMMQOAIgG4Aj6w58/DJHU4Ok8hQt9O0ICISe7WJKRR1dRMDEQAgYMIQQSUSpBQbjpGZANgcARggVEFGHAAdwRAcqEbnGwKDLBu4kHmxGpjjju7kiNUsZ4juKFjFqsm4aRG8ep0RwNAQnRjc3Vzc0IIDA5KDebWZEyO6AZq47HW8zf3b4f6m9EOA5qKl3nPOVTOjSAhk5IICEQxMTY2VyBGxZiaEdKmqThhi5ClWXhMyujeUutS0HANSJI4aIIfxEtCN0QNAcE/OAZmAqAgiAQYh0wmNAQF3D33btuSoqhBwHDMiXm4v/Jj7oVXmNzNVixRADRADBQMXUzHDOaguYBXQVq4e0OqKcXBzq0o0AGj1t8wWFHdzVXMvcmIyWejXkkByNtgRcIrVn7mGu3skdncrAjPVrq6KsEgvE9w/hT1npPp9xPt9KPg0ApzSyJO70RHLxUzd1U1ViymAByJmTikBGBHZCsWqPKruTwRUrJ77mKKfcV93R6sKzcw3ZluHmRkaVufO6UK5u6spKEzha4ZWgQYUvJg7grvnnEWEiBjYzOaMhklMjwA1g6dShCWfY9mmREFEFHwt2ABASinF1MSUQmTAGhvCzK4GlZRMBpvJhUQxmJmIFhExdarkgNK2Q+aSs6ITEjKr6iCFlicuBhIEYKoiPZkZqrvXuLf1Cp9t7pmZ8uy0J+WohcY/Jv+PH3F2wu/lV+cIsNZilysmVrQ6zU8TAOb3Or7h46mczcBPRdLj2zIBgDqoW9HqH3Uzyyoi4ujIIcbAjKqlXmJwXANHW9j+ZKRGBLAlE+sxAkwLBADuuLCs+YhaFVGmrWKkigCVNfPC1GcWchRSZ4onZgauYEBoZsMw5FGYOTIWM/cpG2uCdcSKABVJAhIRzNCJABApIGJwh5UjnIhijCmlpmkSBwZE8xrtg9OLQFUz1R3RwU1EJomwicGDghu4gQOTofdaTJRSxICebdTiqlX1QiQFNTee5ZRqTqobMWvV8xY/Ajg81UOehJPHsPH4kjMStkKD4yXvE1DXI8AKHR//jEcwf0JRXuPA++5wNvUnkfvsiILXeIEK/VlE3cxRwN2MwRMihWCgpRRDMRODBADkYGbgix3AAGimLRUuj0BzQqXUcNbv4WjfcACwKgTOdGuhBI7IgJMJEKYMQzRHxroJDo7IiIaIjiBa00p8yEVEYowOVsq40P6JD1SQWik2tBoAQEg1TAgBqxYUkKraUAMoAgc0hxqBbIbOCIALChEagCPsyxhjjE3TdI0jjiXvh34c8ygDM+cyqqoJB2I1ISAthV2SawghECNjpTPpuFZeQxfW2738PR7E4/o/BZBHqMBHavHZPdf7eHzQ6eb+3hHOYyqegu81B3hSBEI8hkCfxdm/jwOsQwnWb+UE7uSuai6mWbSaER2mUHud1F+asj9J1c1dHVgBwAHBq/cUABwUkAFqFDXQ7IifXsF88v0uLHg+sExJXYloAnf3GR+AiHxlzkNzcrATcxAZeIW2errgMfuZmRkQ85RqXB31VWclMxWROS/xZJHPrMbm5MCBK21uYqzQWRP2wLyKLITISEQEk7kU3TFtOmTSQAYmZvsy3Pe7vu970a7rRMacc29CRK6FkXLODNikuGm7zWbThAhQJbzlfVdbTIhyEtKyosdHGWkNGyewtgIteIQDj5nAGbifYcj7oo/qOHGEPR4TWVoHA74HQ445AI8Q7zF3A4CaYI6zbnQ8GcjdpAr96upW3NQNERUBHcW8qCAnd1fwYlqjrGYDWKikD4ENfHHF17/T/M2pZkguwfGLO7MGzc/Csrs7TZRYwQmgxjPi4rgxN/BFQyBE9RpcSWv508CRSN1kDtxAxGopQCIkcoCaiIizAUpKqR9qpAbN7mpGcua18Z6IIocUYowxhMBIgI5e88YQJg/N9FBFAAQjiF3XSz70+0MZ+3E85GF3OIzj6DFeMEgu/bAPAxFgzhldy5hjCJumvdzmDHax2YQQAHHUKYiNGMlhjvMGIgKbgvPri8wWYTmDijrW+UzwFAlfgzs8Bf0zJJ/cfKGz74uPDGfIt1g/bZX8doKVK950Qpbw+OvjeT9GsLU7+uw1xE1tEkmLmzqoo7ogoqOhGguGYEBIGEqRYipuiEiARoZadb4pFLFmQfks/BCgnYlhfvTmHKF/HuZe9QFCNPCqky47M6FuxR0AQAzVsozgbkTkSFPWO4FkKaU4ADKrmpiYGdBsp6tGiRAWhuCz/s2ADJPfPaVEImYmdgxrwdmiNYVjzWhGRCiC4MAzs0JQdHW/u7/djf397nDIwyAlq4wll1JAkxDknIe+b2NC9P1ul4fRpXRtWz16hzLeHdqLbtM0DVYdBiBUEoNYy3dUZanO2Rf7gZnaESHXHxbAOyPwT5LmM6A/ObK86LwsdbwXAWgV67L2Eq+ZL5wCvb9fp0bEMw7g79EBFvvGmQy2QJ4YKLgjGaiBF3UiB3R0E58CbZnZshiomTlSRX9nR0ebU1+mNaKKBjPXmkAWAcCOeq9jrWtSk1wrAmCV6ib4rsahaUcR0Ku790gLQuDpLYjMwNzYycDcXEyLapXzTU2LPGbcVR+oViCcBTmcc6wRMMaINcGvuKyykSbwUnUzUgdzYq5CihOCGZIbOiCIuwN+9tUXg5S+SKnauXtxK245D0owjmMZMwSOyKPKvj+AO6dY3PZ52PcH3j0cLi+vLi9D00XwSAjkAbEuHBzD7vFsW3X2e5yR1qN/ZgV7C98+A4/vP3IGkIv69OQ4QYD1Tc0M8Rg4sYB1tU4sdzziySo6+mxTJzlgxWRw8tQc/bWV5pkZhRQcBFd+VgRHNHURMS3kTuiFCQGkFDAYS0kpEwFAQEdCJIYaeOluk9rl01PAbIk1XWhPXXRmVrcamB5iUNKSszMaArhZhUIHc6s5DzGEGIOqllzIoWmapmkKVEpT/Z3mju5GwMw2PpSxFGZu2zYPxsabwGRTxlxKaak1FEK43GzHcUSHrutqRBAzX2y2IUUzG8dRVQmQajkMc3R3NUNjYkTk6iY3xcBZimvpYtx2bXG7efvm25t3tw/3mAKmGMAHKUWKmAJh1273+72IdF1rZrf7vRRttxtyAOLd2BMgA4YQ9OFhPwxDiK9evXqx6RBpzIVtkta4BjM5mk1+BkQEJvZj4h6tsgWX7VgI5cQJVW0VlrYoS7Ai/7gyHNkqC56ZY4wVaGG142uec+IJXsMuEa1MH+v5vQ+Xvm88EqPef9okNVaoJau0yz3E6MaCiCoIjE4Ihobqqm5FNQQjMgOqJonA6FanvXrzyaJ/fIu6UrXelhGYuro5oLsBoTNlK5WPVzMNARIiIKobu2tNgUUwAIWKzzjHI6ETVvOnghVTscmboaZm4I4+W/3P5AE0DyFILtWsjg619AsiljFrkTPhZxmBmZDYALxyHc+oHENsGm7SIY8P/eF2/9DncTQJwCmwz1FxlfOIOQFGDoETE6gqQw28QAYkRDORIiWPo5SAZIExhVqfKyIBYY15Xkm1U4RYZcGPwWAtGpyJG37kJLD+leYKP49Z6B87ApzC/ZolzbA4QcgsGj19oyfhGxH9NHlgGWeca3lPcgBz9CkFtSYSIkDgZCbuiuA8Qwm4q4toFmvUPQIAGABX4CME0+XhXr18hGizvltNQ0AI5sik7gIuUP2T5gCCnlXqNjAgI0VmRgIkgeplJkBwQvSptg8AoE3eDAQEYhcxcFErIsVUxXUpq4aYujbxlMVWGWOlfIxUmGs9lVrypSLKkHMpxdVg5s/VwrMs5orfO5hny92mTdtOAR8eHt7c3dzvd1nFAShwCAHNYmHgwCmmlPaH7CEiYpsaRGQH4VDjVavZp5SSIZdxyDmP7sXUmZj5+YVvm45jAq8c06pOvEiYdSd4Velt2f1F/KNVoibMCFDBHU7J6FpEfxKQ1k/5HtwIj+WWI7yegjKe1hV6PBZh6YyXwSnon036CPo1TAqAnNCk0u91kg3AsSoRIRJwQBoQRFWtAHRA6IgITshEhOZINhWRqKb5qUwDymSCmY1bhO6g6KKujOKWJYupqg42BasRYGBWD5FDICegAGAIRIhEZlZMTSEhExG4IzA4uIuBm01JPGo2B/1Ps2qaJoTgojnnyrITB0Q0UWYmwCbGJqU6ARNdAqJoZbGAGhqA7MFpDr10METg1HKKBb0fxtv9w8NhL24cQmjSsWgkgDVNII4x6ghNCsRYhYds4KxE1HWdu5tLDddj5pwHVS1D2Y/D7e6BKTJSRCLkUHMiVuCEUDVkrK6YRSU4g7o1+NZB1elxKrrg7CpZU1Jc+WfPsOv7EOAcfNdfTw8+SeOfRIPlw1p8enyfNQ4cH2pUZRd0qNJ3/VVU3dQM2AEdGDiwg5C7F9MiYqiz2IbIU6DJ2Yzr2oADz46xygbMQdBFtIAJWDWJZCkiAnHW/h2isag2rIEYG0RFRGSqZQsd3NUshLrfhGiGru5irm7VVFU50CS2KE4+CkSdyyUs8quqMlJoU60K6nMeXJwBYr2kBLDUZaoLi3Mw9uWr5yLycDjcPeweDvvsaoRE1IS2Kg/M3DTdwu2vm02dBhCICLEjJg7YNK2ZqR1LEGC1LUYi4qJyGIeGQwDk2MbA86YiAyjCEkWGc0b4+b6cKgNraH58pq9CmqvQv0aDNZF9jGlnIzx+AJwiU51ZDaN/313gFD2O23BqHl2PM9503MtaS8KOHhB2MkaTAlbPIQJiJAYHTuoPWlSiuLuh4VTODMzEvRKiKcC+pu3TpJkC4wkOiOigJUvJplnKfhxyzuKWKPpyvpmiikhEUtUSo8amCbHWXpy5NpoBghmCuqmpmSm6iNRNry4w1kjkjlPSfXUCLPSiSvkpxE3bNk0TiFU1ICk4huDuZVX8o9LjlFLkwMwIWC25lX7Frt3f3r67v725e8hSHBGZDDyGWLOWW44pBHTQIqLStpuqkxSV7GOoAliKIZCY5mwGIjVxX8iRustLV3NAMR1ybikIRyICQDSfPfFus4+2xuuvKfq0748K55zB23JV/WlJfseV43yR1f9oDvAk+MIC/SuF+H03WkD8sSPshMCvSNca+o+LMl/u1QA/z4uRnBx1quockAjdEEXE1YaSa2YtmiFwpaBQFWIEJJuylOdMGVj8VvMExHTI434csmlRqYV6kEhM66zMgBHVlRyyQ9/3bWq0FWu7igO12huaO4GpzggwZQYUFTGtAhlhQJT6jjX+eRGCpxIYKO4eQqhg7e7kAJX187zf80rGGKttFHGqTkBERFx1g7uH+ze3N2/evdv3A8eY2gYRVS3GGDm0sW1DJEBXM8AAWCtOA8CYM6nHOoeUhtyr192hgGTM9aExJVMlm8yXWaTiJKBPRtrqPl+Zj5+EtzO/0AItcurwOuMAi4HIlzoRfoJav18HeAymxysRjozpD9aw1+C+xtqzcTanE2Jw+qz6zjUABpjJJzsXATBMtLNCv0+hoHgMInCoeTNL5aJJE5jJfx2GU03wcRxHldGkqAAiM6l7FbQBQMzQvVrZdcxahBEjh4AEVZLG6dXMvYaXuXsV+vVozjvif604S6sk4xhjIFYWLbJEyJkZzAkcBeaqjDNtqnEQWkQcAhIwEVEKMYUYQvjV29++vXl3v3so6psUuVYSQK3W2G27bTh4UYUCBtCEGGKV/kFNmANxapq2bYfxUH11dcKRIhHFGA3VK/1eCTDM7FALxWMNW6ov/BiQFmq4lgtgbf9ZyTBr/FnQ4ExDgD+WAzDOSmIF9yWjDADdoLpycHoFBEiBK0+rUuBEtKZnV/vi2p4FNBcyqEUvJgkS6bGjrX7VZCKg6IZYFEZEAVVABUPyGEIAiDExBxIrkgNbP+yZYV96LDHFDXN8yJJCNFFwj4aJMNTEHbABhYhAqxMWOLAgFNO969sy3Ay9gQdixhgAg3BPY800AUJzEFFxcTCMFmHsBz+YPO+2l6nrYgoUdiwIwFDlegdXcx3Rd5pvfbhHQWZDHTwbKDNtmYNqCvH5s+cmampYNHIa2KKRiQO5GmgNZyPK46hO6kQGCBQ5RCMYTYbhYnvZGHopqeOX220g3u12vxvGh1EKNxfbrkstZ0hAXdhcwuYSN9d8wUiCpdg4hXmHqKJi2oBDDDW0EE1DCP0wiAgFDqk1syGPLpIoGWuigAY564uuvdhcTCwLHMCQHM0QlcDdXQmnqFE3UAuAxBw5wJwsT+5MpItInAsBRFjy9ZYQ4BnYwQncwWdkyZXR1kIubuoGtSImTFLyZBlXqJl03zvWmsBy5AyDYWXV+X4RaEFHe2RMPROBjgbjKrCDuylVBXc+ofJXdTOdJBp1FLdKbEcvLo7qTkoOtaoHG9ag/OovV/Aqu4tb3/elFDsy24nqGzi5myo42mrmlWnknHuFBBSAAjMRTWZf81MrGri7iWopCI48uQIjhxrAk2IKSBjB3ckn91AIiUKoy1V0IrRNSiJCPrEUEUFzQ61p9Y7AMSDTWMpoYz8OMmYEqHGfTWqiYwLqYlMzBxbtbhHBOQREdAWZilg51PhcszpnQCylmBk6pBDbbasiyQlEyaQKn+LQUJhWYCb8S+J4hYkFjhnAV0ot0LEE8hrYFoHiD5HDzU4KFh1v/ujM31Me/bGstr7JGfF+/GH9+UxWWyyqC+gvyDYFmnu15yt4FcGNkQAMMdDKQVFFdENQdzNQcSdTMUYyEVdzoNBycoAKtdOyel16M1OwUsr9fjdKdndDqOYjrZGp1dTt4OLmPvF1d1N1dVUHUnZIxG1MU0HPukpVZnUgB0ZsQkTzMmZyD6kqoNzE2LZttT8GpOr9ACdCBHekAADZZO37jDXojM3MqllMHZCAKGQpiBhbNsK7w2489H3fj8Ogqg2FxIGR2IGZUwhd16WYQgjok+5RoZBq0iA4gQVDIwAiBRARQ6gpDTLX4WuaZtN1kktwdCguvlhyiKimRCMguRuAufvcL2Da9EfkkogWGmRWqfcpRM1C5vvgdibWJ4oEzUUSHl/2ezjAcos1E3jy1/WR9Yc1cMMKrd/3KzoQAiPWiDCudjRwMkd0RKDZJzfhIrMjEwZHEnUHZSd1Q2Qx8GIYYLINMCOCF1eAapZAd3MbtYw5jyUrOjAhgpqDA6EXVaGJY6ubrkz4UpTMgwG5JbVsLoBGjFbj5IGmAGlgJCbYtB0TuSgEJsBEDAQpxCamqtMf38hdazMVP9ZFi8Q1LaaGsjKSMxlOhSiJp0kmstg2yNzfHW4e7oZhQILIoYkpENeSNolDZQiRQ30oYg1SAKrZDYRExMDI7IwCXkyKSCmlqNStpTkFhw3AMQACs3OI9d8qs7Dy6sls7+40qUGLX+BJ6HKfysksHti68meF4h6PxZW2El4IEeYkYKhKoM+y+veVRnwS6M+40jLORKAzwv/k5eegX60BgAYYgRghEaqTAYBbwMn0SeCEToC1aGhMbWycuAGM5lP1AnMUA1E3FXeWqOIQ3KlWWJhyUaFKNGPOh3FQcEAkJq3KQU08MFe0GiBUTG3hUURZCjsisRMaoSIIgxDElXCHAAHJySNAw4HUXTQ4djHVN23bFqY0SyBAnsFi2khDcCcHJkIEYELEQAREgVAEa+I8z/GkQEghUIrmftCy1yF7bkIKzE2IkUPi0HG86raX3SZxCDAlfuNc2wYRRbW6uBGRmCBwllxKGaWM41hUQghN01TTUyBCNTInIgYy5jBzALMlP8zmOFBzdzsFksmdtyTBIrj5OpN/XcjMZ9vu9+MAnHqxELFaxipvXgMhfo8ItAiFT4LydNdTTFh/XT6ciVK4ck88Jv8AgIAMyITJuThEQDdFZGRHdDYkxIDEgRCCx9jipSjG1DIFQ0JHRwCcwtpykYZslNSoFmZyhYAINUPRAbyojSX34wCIwFgVA0YERjNVFWWvATylFjBEICIGVDdCrrBSwEbXbDq6pmnpj3vGiIGQDUAUijLRpmlr3dyuaWuattWo4hAYiWYzrhmgqrMbAtnka4tcm0FFYcmYReeqKiLMKGD7sc853/b3uzyoKghX7xMBJA6bpt12Xde0qLbAVgWOSbA0m6QURGRyQhHZ7fc556Jic1maGq9BgOyASA0HdFMx9+rx8IBUEaAScvMamOo6BWtU0MepguqqUM+ELjNa4lEJreu5lIh9GgcWeKN1UX5Y7nIezfZ9CLD8Xe77pBrxPdAPq2jnNaNY7nmGAwBAbgaVHFIiMEBHIHdiQgck53nPOBCYdHErYhQicAAnBWdkZnQ1Q6ypYoubRMEjkbo61gg2F9NRyjCOjqBuxVxMkUNkNrWiKg4GXoP1i+myvokDIAr4aIIF93lo8kApXsQWp8B4ZJ8MYUAYQ2CkAJiQmxDrykymD3NDi8w17KciQH0QuQdnRBR0clc3cGciZg7MhJ7zREoMNKTo7ne7u4fdroY8QCAUQ3SKEIEicZqs/kpI5FNg35owgVWH+lTUQ1X7cbh9uB9LVvcQJyOpiaKDRyJzAqzFipCYAM3MRGudqyr5iJn5VKRXEIIBMjvUlpUENCV5VuiXCv/VhkMIcuLfrUrE93CAhbYuWaD13Dmg6ByAf48ItMDxkwjwPeOMb6wx5OzX858AauVXBiRwRuCpCQMTgKvgTBuYHCxE5jDmWufdFKbinAiEzHOLSEQEpprDW1+s6rI1hFNVs4qwm3ldfaejw8VtSi/xWu/QJtDsUusO5j4WMfUQYl9KkAJN56etARkQkLrUNBwCcSBmQDPTIedgTdM4OcGUYOhqXjObzao4V6kgEYgZOWkuAYk5BCYMcUrfIQ+pi01S0P5h6CUremgTx8gDpJQ2selSs01tEyMjmWiK/JjDuzsQevXqERqAuA3juO8P6l4dDjwHGJMDcnBRAuSIhJNQWpe3qNZbzv6ZyWNlZspIs5sVVxEQMNtIaipflfwcjvkDj6Hl8ViLN/MmrrwKj0Y4uwZmqr+21TzGAZzsdGHx1NRiVfVtl1daf16cU5Xlrf0AaxaBAAHIAQopOpI7E1TvjmpxJwJXlZwzMHEMrnmz2ai6FKtokHN294BTX87AVIMVqs84Ii+PLqWMkoFwu93u93cHGRWp6boYY8lipk3T9HqoQTg4iwpLwGYeRgJoU4OAu6HXOyhgzwyeXz9TFVNpuo3moiIXV8/vb99qkW3XvXr2nDk8HB5sbrBJ83pWNQCRGElBsymohRgAsahWl08ItWSTMTGGyEhqxcCZuE2xFx+GIecc2hhiLKVctxcppTY1XUhNjG1IAdBVx35oUgopBSR1KDghdtM0WQUAYpMEfNz3+6HPIi8/eFUXTVUdICEzEbgHpLZtu9QQoIfoajnnFMJYShVXFt5ba8wQnSDe9IEoYO0obIg41ZhQRcQ4d6tYOMDCHuu1PBcCrIayJVDKp2gAqLkJNdcC1rzu+5XgBe7PxJsaVIxLiMSCZadX/d7xvtPMxBxrzWeuaAYu4OTuMEUuGIKBGhKhNSEqhoxiepwwA2oRd+OAMdRUcWAke4pCwOy8BAERgXEkAJqzCgNxNa5O7liHSNxUi6eauyMTOIxStD+I2+vQbKVEqPFFUkXanDMjdanJACklJApEkzpoNhlfahaCGcPk82J0J69BBZOsDBCnpjgAZmBGpgDIgGYy9P1h6MdxVBUrAIhO2GDoYtOltokxUWBEdjRAJKzVU/zYQmnSJWKKgemQh3f3t7cP947w4tXLqqdWVzov5Ntrww+1YCFEIjI2k9KX7O5LUkDN0q73V9MAILi4hpGqdwpg0n0nGeAcMNaTPDsOT8km+AfoyvAH9giDFRNYuATN6TywcvngU46ws5usIW+54XJE3WxZGiIGM0eGKtIjoKlDcHcEA1VkpskNXcuogCIiOmKRHBFSbLoUU2BmInTGKRC6aklzSAUCQAiBRqxVq1CtDTE4AgBzZHYSU9NqOyUKISRGJgruDhTQwUH2w7gfxvvN5cXFxWXTMVOtekVEIsKIbdt6jE1MZSogAtWdRAGJcfla4/6JyCfPOc1JzYi1alBNfZJSoSUQIQMS7Yb+sNurlAku3WNKHbeX3WbbbSKHQBSBEKoqddwCWHmLAiESFbPDMOz3+8M4QOCuax4OewZ0B6wRQVSrTgAtrZpDYGYTGbTkUhDxmAfvvjjB7PhEByRDVNBqn7dV5hfMzfbW4wygJ9BfHT+KOrPcDwCLRPrk+D1+gLX49YeftvCgs3POqO/62rPPTsgIDmRAZm6mizQJVXZ3xurVNUOAQGxkqK4mYNV6YCGmTZu6JjXMhE5uiOaGs3USI3OQKZc8cSAiEy1eCDAAEjI6EEFAcg4KKC7iBuaulkVnDosUGIXLoOM43u4ftvvtJjUxRDMgpgBUPUdd26I7Eck4VjGspjVOrihEsFqY8ZjxhDNn8Jr7S1ihH0ytVnslToFDCKFJ4ziiaiSGhEYYInddc0HdRbvZbraBCMWqV50rfMAxeC7MESsc+TD0t7uHfX9ApnbTDVJ2/aGUUgDIgQwiUgjIjMyckGtBrioDZxFx08nCPAXDzuYfAICaCm5oTks/Kte6qj5HT1X4f2Q78fVYhJlT5nAUW2aUnlWKPxIB1uLBKYAesW05vtZOzj6sBa/HHAAfazZEAI7oNZ6RwcGyu+ec1cXMCEEBCwhacAAzZw5EEIjFi892lTaFNsU2xRRDQAMTNzdXxFQrLACSky9OKHIMxCEEHUcroiSKDmaFAQBqwAIRQc5WRDADgKvW8mz1ddRtyOO7+7sQQhfTq4vnPCfEVV2oaVtwF0SbOwC4+5JvWeMOplhX4iwKtdRXTW1Qc6yVvwRgKqc6JamFkFJKqYmBA3ETIhMoWYypTU3Hbdc0XUzkYKTgtV7AihtXlWyOOhtEcs45Z3UjZgIrKof+gERY/YcGgack5jY1KFqtogZ+GPq+78WFiETVasWaGgU4c4AGCBGr3LWu4FkL+q370fspoTwF/iME2qkItKbCR+vq+/IY/0AOcAbNZz8tXxfuc4a1Zyi0CFFnd5tOY0dDRzcEJ3Q1Vc1F+nFwV0TkQGpIjsUUrDA0SAbIbmKqpiWEkAI1MbUpMFOoVNTN3dwUa6UoREaGOY+kltyJMW6attJaVR1VvMgYpkCamjmlUzsjrRBcq+RWw0UNSn13e5OHYRPSVXMRKJh64gAIHGPXdS6Sy2hmHGOM0UTY5yoBSBgwEi89j6uvwMGrdDyZ52cQqcpwoKloikqpERkGjmyBY0ypCbELKRDXuhluVrHdZt/ZFAtEtFRCv7+/N/AYYwDb9/vd2OeSYd4dZgpM1YFdKxHpmKuxX0T2Q3/oD46WUjI3cRUT0RqoO8k5TLGWP1AiAid3cQsGNex8gm+cKievAfpJHcB9jus6szcejUzHVvX/GARYoHO5xWIXwnUZlfc84DGSwCkHeHLobD5TBxHJOQ95GIZDjQVoKRqQmRV3AEOMImKW+8OwH3p3jMQhUGoCISJYrS2ORObG6AKADusuIVN/VbMmxO12CwAlZ1CTXPIwZgZvLIXIIVYjpjowkq6qD9hS9YBod9g/3N2/2F795PUPkKO7OSEz17+mUkoRkYoA2T3i5D0lokgcQqhlHSqBqIpCLWnAgMyMc4onOyAYOriaFpFRwDxyKCBAzLVUbggxRnKoacSEWP1XNOfm2spdk3Ouc6udHksp4ziO4yhoRCRukTiG2HDsaCrfq6WAT5g/lWtGhJq+5y5mRVVqwS+fxEURoYp4RuoA6GGOLKy7v1Z/q0KMpykvM5LMHtXT4IjHHOD7xx+EAOtxpBynSvBjJMNTNeCxtIOnWsF0ZDIZmBOam6qISCml2j1jDOrB3W2qvWZqWYlLKfvD7rDvmVlTJOwCElVnE1Y6D0SgfnTCV9CfA7nBzDiFhqiUYqomZXqu+uKmWd6XmXOfIQZaiZ7VTF72u+GwPwy9IyAzqjMzxSgm7l7xuSLATFMZ5xWo8aFEDKcccllqRJw63HgVxx0AXE1dchm9mmiNkCjF2KSGOEwVHUVNNXGopR1gRrDl3evLjuPYNE0/DofDoR96cav1VURV3SJxNQFX5llK0VI6JK8tTQlTSmqWZXR3XcW0mVntDIOAqmpzCZwamWyIugbiU8EbHymTT44T8n8Kq98/AphXPjiN2TjAkxvI3FyWPUBKMH1gJHJAnRy9pgbutWPkCQqmqYJ2nYuDGyiZ4hzQW1XAxWl3VWIhKEQ70r2WB9cDk6Rm8F1QDAhGKkTqaCjuziUyA0C8fRi+/vLry+32+vJZ4uRqMcTIHADJnJFUxMfMQWvAL5AX14zqZAGgI85Z1GTDHDbdGFmDUwJ+e08hlKFXVEWQBjwEIYLNph9GtrwBbDF4lngYu0P+m44/vPigdJtxPzyLV8FhP4w7zjcb/A2N/QZLavNnuxdvy581V9ux+fc/8Fg07AuzheeNBtcyvMQmDV7Mv2D55oIObas39uGd/QS3N80dgBqBollgZduDiMigo6oCI4emoRghtjlGjyVpCIECIyKFQIFrZnuMsUYHZpU+5z6P2YoHvGvz3fhwV+4HHcXU3NwxBoKiARwdxPQAxoiBIjMzcuratNmollFGQwF0QzVVQSjkI/qIoB44Bmb+IDZLJAUAKoCjG0CRUqU+qjGpczWnQeY+zUyO4DrV1LBVh54jqoCXObLJDHKeekEwkrsREgdAkVqdLsYahHuKJWeIeIZJR7FhBll4ivb/IWMtmS239TkaOasMJQ9W+pyHXFQVaO656e7u5FMp8+p6HIbh9vb2iy++6Lru4mITY3x5faXuwV3cGHz26q7qusGJ4FirIwSqzkhUtxCCqrbPrmuDRz0cjBBjCNwEYndX5lo1vyoVwESBO+Lqah2Goc9jB4iMATA6x0Hu73cw6HNqtoUe3r65f7d7/tFPm6a5yfc9qVhphK9jG42VRRgZeaOAB1XBEEJuuGkad8+a1VxFoBaNQSyl1PhWmu1IU50fkUgcYqxtm2quPccYm0ZMi4rM5eRFZCzlm/u3fd+XIjXa0N1VtJQCAFXUAXOYI4UYsOnaRIxulcnUEkYMYIFI3dmVzB0VAYkDB1/B0qTq1q9MS41Kd1cEXjSPOYl+bSd9DHvTT++BN8RjnuAUyETk7mGR4+stlh5jT4o0sHKBnUnA/wgEqLNZh/sBgCKIaq/lUIZdGfdSctHFJjhf7FCL6XuN8ZBx7N+8efPr3/4mRX52dXGx7S42LSIwAtQ4LCKq1muoVjYDOzbfJiIGDoRWSzkb1/129zZxERlKRvfqjWIDFOPABuRzBTslcCZP4dJD56hl3O/3D+2e2hY5BKV4KM9ykN7TwC+sxbvbu198qe/22w+7q3/6o7tNY1uSpomjbEIqu94SDQhkfDFqU1QKItN3OF4V5RBiaFQGUdOi5l5UchZEZAxWwYkjMxMFG8RjYiSYk4mRsNbAG6WMOWcp2XWQsh+HYRjuD/elFHWPMQJNGaeqWjusqGq1zgTimnRcc2sCQFFDsYgEDAA0SmE3FkM1ECMEBmcGRPS19jjDwySS1QrbZkS1ZN6EErVA2KQJ4GS98urN9RM+cKw79wiGK8jV2jNVrFXVIwd4EqV8TuZfo8GCAMs51a73JKC/57ZwVBzxuBSIKK69S1/ybux3ZRxUS62DQkRe805xyat0qEkCkwny9vadmX393bcvP3x5eXlpmw12GJFwbhlQ2+fMrk9bI0AAnkztAMpuOCGAmyNw4oaIOAYgqrUQIxKZq5qgGFFxU0YLtCkY3SWXh353N27bJiZgLNqaf2TtVjZhkMsDvvviXn/7tsnW/7tf4Mvn/kHoGQcdIQu3ZIiQQjFBhU0BGjFjeJOH39y9fbbbvXj18ur5VQipqEuRIY9D7t2dOdZO2j71mwroQFVNduEadwwA5MZ++3Br4LUCwCGP+/7wcNiP48jMRcXFshQAyCK11vwCrLUES+KQUmpiakPsKCB5dktAjgHJ1c1AAYwdo6N6LRWGcfZv+qKhIqCDzQhgtZwqTO4rmjVdm2vHwyn5x1UViTMYW85Zfn2ScP+ejLA1HOMppH7PyU+jweksj1i71vrdB9Cs0pc8SBm1ejuZCEEdAWuFwBQCcahFkGcLJqcUKHC/H27ub968e3t9fQ0EKSWKCQCW7u010GvyzcyRS8zACGqAXkMPPBJbiO4+lgyODFzj4AkwY6mpMOIgagKCiIOWDKaMaXA2H2F8198/Hy8v4YrAcbQXqd3f3Y+/vcXbIQwcv7jfvBuvuPn6t9/R13ebi2dvY+5Bo9Lg0nSNENhhJLHGEIEAaXf/8F9+8V/wi69+9rOf/eSf/PTy6soJ3DFL2R2Gtm251soAgppT5GBuKURXK2OG2lMMsZi6637sOUZjLGK7sb/d3e8Oh6Ki7UlmetVgGbDmSEze6Op8CLFJKTgGYkDjKdMzAJgX26SmKBMwTW11KIREHNYiNMx2TJxqFwA61gq+U5sT9+oGUbclnI6mnm1TvwifCkEcO67CI4vLpAnMRQYqT6s/Hduk+qOIufdJQTDLZPgo3P9p0H+KA5zdfEHTDDaCZbRsml0VkAgBCVSJKITQhNjEFEIwUHcHjojYNM2zZ1cvP3yJ71Dc7u7v9+Nhk7vRhI3FPIAHZqyLN1VtQDObc8U41Dqe6lJxgNlrHWZGyFmLuBoUjSml1FpIZoZiTlbMMmhWya6KRqbmvs/yZk/PxmcvUBrEpHqh/PYfvnr7t79s70uLXfz2/mKvEbL6nf3u7Q9+/BrNbxpvN+FmPFxyaj2kbGpQAgqAgD/c3X3+n39+95vfHA5DLvrhp6+vXl7HTWKKtaBe9UyAOdZ8OgomGgKIlKzFXau7yt2N0ciR3QlGKPt8uB92hzKYWT7UdMcQOag7igD44rkLRIGYavjD7D92UFBXraF7DECG7gCBOLJqiG6g1fi08NsZASbr54JyhARzdURwdytzMGntpwdQK90AAHhNcMWl8McJ6OIj89GiZy6tGIgoLFGfE/SfFjb0OSVnGbZIZTXiG+rio58FAeH873vHGuUmTodgBAJewOYcXKNZTwjMTUptRQCXGlur5k0Kr169+uEPf9htN5eXF5hY3RW9uA1aoCgjNQiJw0InFsNynUGkYOCkighUox0jRuJiGIiEChvEEBoKKUZENPeABAAHzTUsQkzVvRpS+5wF/TrvP3G7JmiQ3n3+zXc//0y/eNdZC1nxpk+9mJbC48OvfvfBDz99duf8ehs/vfr27Zumu+oAYjFI8Z79FoUU+4f98JuvD/v+y8++KKKvb9/96M9+8tEPPq5l423yHWFt4DxRO3ORUoWH7D6WXFSAKcSoBFnLWOThsN/lIZsqAjDV3krqDrPIQUQxcNe2kUPilJADUkAKjmBuQcVUSz4MvZQM3IRAs6Y7oQ0z12AWcGXkJRwUnrSVEwDU+q1T2+Z5o45G2zPSDusgGjsxta9PWNROOIYJnTbKPoHI95N/XwK5norn+QPHYyFqmuvcxMoRFFzNCcwQGWpgGFVfKRObz21hzJjp8uri9esPQwqxbbqug8hGaOBZxFQiUAjBI4NrTY03B51xdkqwNA1znHaNiSdARmXEFGJ0TMgNh+o8jjFCrco4GktZ5k8MJnbQMo72UHIP6syM9Pkvf7v76t01pk+2z3L/MO4Hz2Kqm6tm95uv9n/zy3f4UP7s9cv4T6kfr6472o2k7ol3yb51aQxMJNwN28uLcSxffPFFL+Pm2cWL1y8xBlUVm3L2eRYwGAGRRIpNLQJ8LHkomVLsEonaoGU/9Pf9vi+jMdRA5RCanLMUraUf3D3Gpk1N27YNhcgpIrFBzWoIgAYgZqOWIQ+iOVpgpJqNgeCungiNEIwnD7zDUh2nQtli/FkDxrKealOzXT+FlunD6vgaPh+LQDiX36tyBM7dgsOCCjNyzAgwx1UvLoKFg8BKJVgesAhFa36CiDpzGDdbikMt9fHWd65DtNRGEnpaK7Oa9VLNuQ4hcXAncQPXTYqHMgbCl6+epy4ix267SW3T5/Gb776LSF3TPru4ZGY1k1o1HAGRAgd0A1O3iRczcxsiMxt4znlQTSGAOamzQ81LjsRMtOk21Q5Sq8cdxgHF3BSIxiKOAMSjlBBTjM3Nt1+yIg6Cg2c57O/u0S126eHhISnY3eHNf/6ltz7sdh/+kz+7vHzW7iWmVpm/8vEbyXDV4b0cdns9DHvA1x9/dCj97mGPyCI2ln2z6bquqz7Urmu62EouY1EiIjCKJCK7/jCWQk3kiKPlt3e3FFgRICIUENVcsphGbRCRAoOhK9RWAMy87TYBKUIgr42HMYbQNW2WnlBDjNtnV1oyBRRTMXUEDByYjJEdJBcEB+aAVCOo3acgPENw8KoE6yqPeBHc10cqczvjAOvRpU5XvIuI1mUna9G7NR/4PeHQj+n0up0LriaxNCTC5afZguSz1I9znOPUO2r96zzW+LAciUDkyIhT5G3NCUEMjk7oiKQemDdNq24CTgTuql6p+MrwbwYBwQEBrFrQp6ARQFjI0lTamAAjUCJCcEBngwAYoHakQDfrUhNSDCHknEceQoMU+P7dmzY14FSGsv/u/t3n3/z00+sfvfrk//XZvx3vHq6tA3IDFRAnhIbaoWTRpPAydO3et796Ey6SPO/hhx/0Deq2aygO/Xi4vd/v99WUU/eypqUDIjiEEJCRkRBBRLIPHFonNlEFBzet4R8pKPjDcBj38tAfvGYz59znsfZHyiKJCwaOM2DULsYVaBygGn4JgB1jjePeNGZWyihzHiMwMrKqsh+bvhBj7VJItZPPHFbt7lOgFrp77VM+EXafW+8AIhAtIgnNsOFzpZOFD/gMML4yED2pDyyw/cclxJwhwxqC1yxs/RPgZH+lmnQCAACmcuR0cJwlAFQQr1wJzZGBkQIHKlbL86dQEwvJ3Q2BGCUXAouETQrZI6kDobqBSXFChJqVWzWcyqfqaq0FRAQI9bE4YwQxsisDooM5ErAtwXNkZimEyBEv0c0i0u3D/TCOV+3F7bvb4dubi/biz3746cd8PXz29ruHcvjyHR0KNp2ZZc076RmKeNkO2ku+H/tntIE3D7v/79/pNt28bD559T/eB++VSP1idMkQY8TrDeXBRAGhbdumiQ5arNSe2KkJwdmKigDEREiuOspAgZ2QGg6Og5aH/e6+39cawH3J1bpSps6UKmLRIiRk5hqaWhdHVQMhmIMBOgSmwByRhGi/3x92O3QNIYAr15xVBzQFDuTABJEDACJRVJzqHgDiHCM6VXOfYbnKSEtQED5V2WqBn8ewukgNUzn6R9aa9d9/TFmURcDyR2F659APADTNY1F+KrAvfoNzDlBjg2vFcTOYYh6ZEaZeiBxq/rh5zaWSiBAQwNVEy5gLGENSREAPdDLQoSYcA4AB1CSX+roE6EQBYErnRmAiYG4QEN3J0QF4KllXWVvOGRFTCB8/f3URms8H+erNnffjlcZIVxcSt98Nw83nGbpX169/sH0eP9i8DF1DWMZLZxMzy3YtaQh2Mx6iXY13u3d3v4LA317Tj/8X/4q3hEOhftyMwNw2TbPjmjgsKVC3aSevcB4pcUdtCCECmWr1e5ipStmP/YY3ITYRQaUUGQcthzyOJT/0h34cEJFigLl6KTHP3SunzOgq37oopYCIBF53JFaft7oOWYaxiRwR0ADcqBapBkRHRmCOBdwBiSjVInBEtQIMAepURq92lgEHQCKt/TUQzaaIOJqKXjsAuDnzxCqsusNWlLru9eI6eBKMFxz4/Qhw1vd3jQNrCF6Q4cgxZ8lnEuBOw0iPjjM/QSoyJwN2YKvh786IiZgDNTFOocI+pTgQoJkGokiMamO/398/FIbUSIMbjmlx+zESIiGAutQW6mY+9VcFmJIQHb2WcvDaZ4aQAKa+53Njsdk3ibVTnRobdF3Xbq/g5Ycp29sv3x4O980efvzsxb+8/lG3K+9+8fnP7359BYEEpRyK6iijoRXpcxmfvfoxtM+/efN5XzIy9bf7iFR6GX/z9Yvu49RevMlSdj0UL8P47u72BacQQmxi13VNG2v7R5Gcy5AiB441BlB0cCPLBcgxUtOl0WQYx8OwH8tgoPt8eNjf74e+SlPuXqsMdZtnAIBAZsa1cEQggymlIRITeDCo0O9q7N4gYWo3Tera5FpEpBbrJAyAaMgGYDg1e2jiCW01cHasaWIMYLPYQnPJf9G5VQzM0o57xZwK/TSbGucK/ng2Fqhbg/4yfn9CzNlwBJiLB9YxuandCAkn94Q7TO5rFz1DFUTkqYym1+vXM6tdQQNxZGZBRopAjNREqvXva3gjWJUikcwDQsMUCM1szP2olksJqaGGmiamkKgWkS5Se3fW56p6UalmATL0iIhIPhWMIAcAZGIoZjPX8lWwrtfsXjEvUnb9tml/9PL1RxfP/uNX//6DTz96/ZOLq4HKP7z95u9/gzf9q+5y3N2olpyHrEPlzzkP43CwH3Y//MHr//ruy2G3T4wHtgTgY/n1//PffiB/9em/+PNA3TcwPowHySU5ElEbQ2gTIyBijCE0TAyjjDQApbZBdvBhKNEIHZrrTdO1Hqjf5/v97u6w249Ddq3vsmDykvg/jKOamXrtPd+EGCjWuJxAnFLCokxQayq6qg19h7xtt00TA9fuaeSuDOjEiOhAQKQ+gXvwGfRxBgkEr0lhADpra7yQVJAzEWMC99M2YSsX79HLuwhOuJI4FlCsf/8AJdiP1zwpET0ey/Ga+bA83hbLEjMuTmw8wTdyDLO1J3FQnkaq5Zgq8RZ1d2JARK4t9Ii71HRNm0Lcjw99yc+ePQshtG0bMbr6OGYoFogpTnH8RSdLHxEFpkVXxkV4Q0RErhNEMoRa5X7u31f5GIgYI4YIDXGA8L/85//tN7/6/O4XX+y+fgjf7i97u4YuHpQGgxh2QEWdEyCyGXgK+5b+8mc/fvYf/v3N/S1uu3sq0dFKufkPv2gwRgjpJx+kJhXau9lzbouXSjuGYRiGwwYuU0oQHRFLGQeEttkgeimFgLabTehaYxz6/u7h/m73sB/6Yqo1IIqQY6gF2WsfxcShQGTmmhKwUKtqNwwhpBABiNVrsWERgT5vuq5rEzm4CQBDANDZz4WEwEDs7rWTZc2hgzmTq1qiDZzMHIHrwRUUGeDRCnSsb4U1m9Qdpr4nc3SMroRqWjVTWgiun4ro/3glGOBcfH/y19rf88jyzJZyPRVDlvDp+iwmCkahGvuZAYkr58Upd9GPanONSwFFJKKmaS63Xdu2sH/I41h3rokxQihlzJIBVYlTYABcBLOplgYazj1MjsuHSIjmwLgUJZ098O5Y66yMJbg3TWooDLv9u2+/e/iv391+9lW3049y2Apde2hGu33zHUXiiwQieRhczF36w0615C7wn3xyfXF5d/tOwO9RgkMr8qHy8Msv315d4FWEHzxTRs2FRpEoZmal7Pf7w+HwzIwZnTBG1lzqTpM7oBFR27bOXET2+/3Dw8MwDIsFcBzHWimEmWuV3FrgRDBhraWjk92i5jlMgYDMaIBuVTkws4B42W22m1ZEyjhERiYAtYpR6oiTxgtSi4TOCEBzUoeCE0zNZWs54pr2NgEooplV8WwtPy9W0TXwVCDzVQHmioe1AeFy8lE+Bwhr+z1UG8gUio2LdI41AYAZES8oqKqKIiIxIVIxdZvq/0jNXvWplJq7A5Kai2hwCAETM0LtuklmBiYuWkupEWFA2qGKQcPhRbttncaSG/NtZACg5J5AidzRzchBAAoRpYgeVUtxYObLbsPMATyAMhmhKAxDyZbSJnU48OaiG4ZB+oyE+ZDV7IOPOmBomjRKkZy5IXHrc8/I3GBwiEWhN0aiTZubuGfLm3a/63nUj3nzya67+vx+97e/DD//7ANtVRPICDKC5AcZbvKgXW6aqNb3+UBkQHE32E5DCJv47g5++/nbm2+7bbu/u38+mOz7iLzrD92m1c++vv77539y/ezbbD8Pwy9/1v347eWgFFJUCIBdwxeHXd5cbrrQGgykkneHy6Z7/ux5EyKDf1NyzvlhHPaqI8JorgRI/Or1R5fjuLt/6Pu+gF50myYmAGi5RaxluJXUo/OFwlbhw6vLi9hcpBBTKmPWUgCkSfTx9SsAMCkE0DTNRNoDAMUK5ae0Fg3nLpEwUbJQq3JM7BcrALpPgQ+DF0DywFUq0vmXQOxIVQVaKua6Ki19Bqr04m7gQlgRsp7MWJ902if4SUq/PliRYcG8tZLgs29rHZAEq89r6QhW1s/p6+w9UPCp9xNCcKu9g2KMtTZyjTwxM7dVqW1CESuqABQ4NU3XFQ1t17abGBvCwMRN0zGGJsQYG6YAhLWlKRo6ADKZGbqDeUBSZldH1YgUOYgZEtXOIUCEMYQQNgz3d4dnnK7brnvXv/3tF29/8ZV++Z2XYgKgBq5oijB1r5FSYmRzEzdxmNs2ACA+DIf7fj+qFc2DCCJgDBGjuxOAjeX2mzf2zZvrF80n22fQGytiMQjuCqoqLkpQXMh6lTGoNrFtQuhSkwxdFMDAnBwCIIjpmAcpBv4nP/rh1fbixcXV4XAoY+bqYHLAUHuiOTJEpou2u7q4vOw2282mi02MMTiygyKh+dqPecb5zwDg+AGPBs0zmXkNbwu1rqbz1ZnT+Uegn5SI+SmzjodY7XcnthlYibj1YICn9N311zNAn1o3V7H+kdFzQYDz46djeYH1StVRE0bZqIYcwpy+sOjNogLThXVmpm6ysvg2m+6iSW3b1rXD6jPiUDNuiWMt5lyT5IHQzfrD2HZOACGErrI4CIiYIN7A3hiFWUkZMARk9FYpadxKaB5K/9m7d//118Mvv053Q2cEJYM5gaMpWK6WRylj0VAT59UckczMgYBwr/owjhpwMCgMjIxiYy4bjijmfb7/7Kvy7GLzlz/5AW2374ZEHWbkBKSuuRioBy+QtRioIHAIvG3ay9hSNjUPYu7ccSNBJAkUdwUxzXeHy8vLy83VRdoMh95FiSgQlxjBTIuAeRPi5Wb77Or6st00ITJy7eLMSCHEanCpvtUzBIDatvUREQQARF7jyfqnJ0dYyaVWS0YtQUFzStMaGQCPqZ4VM8yMALDmQJstztn69+kukU9+nV/MqNazJjwqsgv4zgE2OF8+mdlPcUDMGNHn/IZqYqwSoZnXFHuqkmMIVd+q3KCKVlX+meu9Y20S0Y/l4dAf9gM1seu2zMEdS1FyYmSqRk0nrJl1gTyQijhhLrJ7+6YWEL9+dnl1cRkxQPW3ixmCgDtKQWfAzq212GV6jpfy9c39L393+NWX5ct3YS9s5I6uI5qLCaqAm2kxmyuFg092a3dDquESkNLdOGqMZkrUsYEWz1ouiLQYDmq63//qi3Rx8eydfPjgfN2AIQG7Qh4HN0kBkMysRIZNDNuuSSESUEAgThs2d5eQWo7bpu23l6OKut3c3FBrXcsQuG1Qg1Yfn11sRMREwbyL6aLputREYjB3kzxKgCkcOhAHrG6bo67o7jiVQFzH4Ux/AU44wAI/VbJYg9kCeAsHmCwoZrW8xHJDX2rbVWVyNqTWO6jb4n7VyZ7tCscqKUf0XSj9YxxY3s2nohweZpVCZ3Foof0EsHQ1AABcNbWluVfC9JTaQM7dcUrkMXd0RMBApEQhBELgEGoYei2RoqoIAA6zqRgxsGXd7/ubd3e7vt9yACfkCE4qruhIEIgIGaDmXxsFDimqGyKMJX/z7Tdvv3uTOPz0Rz/+05/89LLbRI7oMAwH2KASONQKbKGjuBG6GBy/flN+9dX4d5/Zl++60RIF4OouVTFRKa7FVdAV0UMI6uaETgyOSjRF8hEepHzxzTc3D7shj00THYgiU4pIwYsk8Uvm4e0+//LLq/HwP0gvf46BHMnJcz7cP8hhv9kSggHBtmmvuu02RUcbTZQ4pHRVo1/At0nGTrQyPcRvu++6rosxqmrBPIVpqUqIgRiCR+IupjYmApRSTMwMAiAihxADY+2vCibwlOhyNh4fX47gKnBtfQ5OkQGzaD3bPdlBYe4SuWrH7V67OekCtIg4pavOtpOKaejHRhtPtEk9Q4lHTOCID4u5qqbYLeeQg9N0hyrGwIpCTIg0IS1UDqCzY4FrrRpHYwMAZMLADlOF8slbiVgtwIgo5gEwi9zdP+wOB0SKTeNEzIGQANB0fgxglT3MjJg4RlJ1t+L25vbmb/7jf7hou6ZpPvnkkxRizbxRVU4RponxFtKFYHM7hLfDF//mPzdvDvG7XTwYKDiYQK1mJGQFbFApaA5YK0+QuKGHKawF0QAMUAy++/bt7jDc3Nw6OsBmKDkZXsaEiAQcHSPGMPrwu7db8r9+8YP/KLeKhIA6jvfv3vr97qK7SsgxhbZpuqZJFLJpMQocY0wbjFaNb8iJoyEgEQYOMNlSSimKUycyEXkAIOLAtElNm5oAaEMuQ7ZcOo5tapsYu5DaENsQQwhSjqLOk0RzDehrcD+D9cXzdX6TuXgWzJlpAMAOinVT0FYk32sPlPl6mkuPeY0Ocpd5AjQ/6AkR6OzIMmmfZPc5zn9VD4eIwI7oCHPqENaEklNNqAalGQDN6u/SQxkAbK63A1UZCMEJa5Gx6rdy98mNgIgOYo7qu8Pw8LAHp2evXj5/8QKZEdiRHMkdVVzICDgguUu9AzNTYOZIKQ5S7vYPNcQsdZ0ijqrEAWJITGBOAo3ARZZ4u9v/5ru7377Z/eJz7KHLTMZglmsjewfGwU3AZUpxBXR3BRdViuRI1RouBiJCImCMjf7o9Z+kbQPgX335pQ49NBEIU2oICEaJRP3+Pgb+6dUz44MFpMB50Pxw8GG4wufbJjRNpBgQQEyysxNTwBAgHASnLnc+p484mQVnAAJAdGXE6mN0d/Xe3QNSE1PDAYqOY867vQw5XVw2m3jVbZsY2ZEdaA6+X0P8mt6t4WcNYOuvM30/sdPjUhF6QYD60xyvHpEmYdvdcH3CCRZNyKGGXCufkp4yohM/wGPQPxvujqcxPD6nWpoZ1TyGU+5BK8K/dNv2WT0gWLmUJ0uW6Sprcco4Ul3E6BqMUB9g4OYo6kMuhyFzTM+fvbx+/mIcx5wzAExB/27CyOgKntCKKVqoRck5xtgkYwrbTXd9efXqxeb5cx2yiDkHSk1yD2pxkG0P7f1gv7u5+f/99s3ff/Y6XtBoY5bgyBiYmUQgZ03FXNG1Rpa6gbipuTgER3UwAHNUt6JOYp++ePnq1Qcffvr6+uWLm92tHoab/IaIkDl1rRcZDoNzsHEkxovu8iImJug4ghNkIfVNajZtik0Q9GyaHRS4VjswgL7viQiYfFEWCZUocqAw6YW16AMgMvMlb2rV0QY5OBZRGcZx3+uYod0kDpXJuKqriRrGEx0AYJH468EFJRaIP4G3BVhpycRYIUD1aSiuGiuu+EAA1Ooac3fw2lQG+ShmVzGj8vzIXPXfgKirKp0BT8cygzPtfvEkV/eRmK7hu7q31ni8XLtqPHo0khqC5rK8JCLSrLNH5lqGCRED8+SuZyL3/dDXiWUp7BZjlFKyokt5eHhouvbjTz+5evFczJqubbrWREGNHQJQzXTJmtsIZC7jAMybtrNATrjZbvdD/89/8tcffPLJmHMK3IRWHBzIHw4vN9cvUxx/99VX//a/vPu7X8cH+UG8QAUBEDIBiuCszgAJ6NAfUkocY865qBMFMBJXDsmBHK0fxkPfQ4ivX79+9uzZf/PjP7/f7+6+fnP77ZvQxr/4yZ/2rz8advuPX30YHRkDOajaIZd+yEPJf9pc7/sHfCht6lri3e5wcJECKaG5OyemFLBzjXIQVVGAQ3/YbrfqVkppmsbAyzhyjK5iZqKiZmpGqsw8lD6EkDhQcHavglrphwDYUNikpokJ1dS8WtUynSivy9a/n5i+1+CzuEdh1nptqYMCUCsgTKL//KyqD0zAVTlJPX0WqAimhk4TgUZwd54lGl1EoPeI+09N//S4Exo4vadxzXJLr4oBTH64SrwRJpUdAHS+s8Dk9IY56NLAtTpNEABA3cQ1OoJpcXXnN+/e7g6Hy+uri6urpmkCeEjRzLSIFQmAtQQUiqkqubMBEpqrKDgBIsUYL7dXL5+/ePHs+abdwljIPUFgDz958YPhyzff/P0vh7/7wn/73Yd3gAOa7Xm7GdAHdgA1x2ROrmSyubw4HA7jWBAYgIYxA4cmdYcxby43H7x4tX12lZoOuLbkgvuv34wluwzqkndQQHMeShl/Nw6M1ISmiW0KkTlsu7jpmv/584s3D2/e5NsbyXf3+88+/+KDP//0z//8L/cyuLsVw8EbK53zVgKIl2TIVCtvAkB1qbq75Fwz76ploXIGMm0psKGWvC/7wSCF+OLi6mV30TXtNrUpRMmF1iLKP3Y8CTDffz7ZVGMaV2g0lQY5vRVVN1vVN/woZdSaLke2s7YCnU3raKuZj9cPS6XpWmJ7Uj5mTjfJc/Mssaop7gi4sIJFCqxoUIUgX2MCeM1wnSAevPqtdFZ0xAzI3K2oPNzvPvvd7xzhoz/55OLqEpgCUy0I5Tr1nggcGHHOMzODSRpQNTUhhy6mv/zzf/rjjz+9DpvQFxztWbNpKYxq4y+/ffPz33z3t39Pv33z8gBX0JDR3kpRVdKM6khggu7RhFyHIQ9DJgztZitF9+Oeka42l3/5z/7MADyEqsAMeXzY7w6Hw+XtiIiEBiZSxkGGAmLgWkZEHGmMtG9iG2OTQmTmv+yvH8Kzdyl+HS5+ZW+/+Obt3/7dL8IPPoQ2tG13ETaRsVO+0thlhkG+CRoIXYWIGMFLrovIcbIOAaE76GRVNBodiSAXGwYz31yE59dXF5vtZbtxVRNFcyauLdtqLTqYIeP4F1ai7dlxPFp+zkDufXA/3WYOFq4chxAXPlBx4AiNj47DWiqZf65nHk00CwfAWarBlf6+fFheRRECnD/VveY0nyvWayFowYHlWTM/cncPXhOCsZawrJn/YpalSLWvAio6IxSV/Tj86je/+vbbbz98/brZNEheM176cdQiZcyuChQSEzqZi0rOBOaOFBSsiDpTMLiIm7/88Z9+evnqUiiMvrVw5TTc3j18+dWX//3f2/0uvNld9NQWrNFaTDioGpijmxdVUHMCcdBhLIBMIRa1XDzE7tnzFx9//GnTbQ/9+HC/r5XHzSTnnKW0pWBgIiAzkOIlmxcHiJvGARwtq6pqGPvCiZG2d35x3V1epctN2zTtuPvi3Wdv/uvf/OL1n/5o20FBCbfj/kFGTS+wbZ3idbe9uFDV2lmj7/uq7AKRuhVTLAUcylQwBv0whKZpQuo6RsTL7cU2tV1I6A4OjBQi1cJ4VkREkNP7YPd7APoM+r+Hkyyl4xYUYcBqAqoA5LPkM1n54QRia3t6RCQ/kumFD+CSD/AYBRdqvYDsBN94fA3Dakc4vhLN1yIiTnGUx/svT1HwwDVYfEKWJfXfwM0BqVb8q31frKiMJY+l1MhQR1SEcRzf3d/95je/SU1zcXUZQujHEZlA8eHhwUS1CAFQai2mpSKfkxtgqBngagjQUniWug74Q+peaIr9qDcPu5uvvvvNl7/7h9+Uz++2HK8gbDGglUMpCioBNSC4B1QDRXUTK1YQrem2OZdDP46577rth68/+eD1x1dXz7786utp9QkjBWdkoqRx2N+QihVTFdFsWkzyqFI8U6iVaAO5AXBtBJZ2d0RyGCwM4cOPLv68/fCzw6H/u+/+zX//C0XIh/7w7Tt99/DM48eXz19eXP1v/s//+w+fPxORxCGEcNjtY4xm1o9DMR3GsRZGR3AiIKam3TRNk1Kqwc9d04QQwFy08KyXGkw+VQzrQmx/3DjDhN97Gs0qr5+Sczyl/dXabavjDDjbi45V18EnUSZ8/wyewIGKXnNqps8ZvFM9LwCoNlA4gv56QHWTwfE1Jsln1nhq7ig4oIGhq1st5jGWUlQEPdAkwd/vHr765uv73d2PX/342bMrAx2HbO5iutvtqtbRhOgxQm0aQxACYUJX9UCBKQEyYAwIafMsbC5Gl9+9efjttze/+nz45lZvdmHffyCXrgY69uaBkFNADkammoM5gYoLqJmJuzmYFCpihnR59fzjjz/94PXHpvj1N98y13LQpqpSimg2M3fN0V1MNYNJ7Q5lJipZDiOnCCkRuzoQBDZDcnO3wX00LNSAfdCGh/3427//+mF/d1eGfe5FRIb+d+PwD7G52Hb/0y/+V3/x458BExE1nDDKtu2GYWi7UFR6jg2HPmQDx8DE/GKzWRLoausXMC9lqlBLq6pSzBxDyCJ/LNyfCSp/CCbgbOOvOMA+JTPArA+cSSuTQerRcZrqxxyf+3sQ4MmpwEpkmtaFyOAJhwjM6vwC/csdbK6q4qtIDzOriXj1sDmMKqPksZRaMZeh1okjFXl3d/v5l1+klK6urymGnHMx3e33+/5Qn1IzCmDFzZhZ2c1BwEPtt2UQ0BzDRvDm66+//u03h998I1/cxIfxCuKr2G0kZikHdSHQRCHVtu0CByPw4B7NxSS7iLuhHnoLnJ5dX7/68ONnz14U8dvbu7u7u013oaqiWbVUcltra5YaZMAGKmClVp8NCIrI6ARmWkBMXRRZOeyRLGdum42ifrPbMH+Y5XCXL5rLX373MI77eN1Ks3kr+e14R/bw+T/8Sv7qX6QpxA2iaAIYS9l2rQbuYhibVIMjnBCZnsEW50h1RiLzKtaGOZZmDQN/FOSsQegPVAC+Z5CDPek8fsqpzIDyiHvUz+9FgLPXO1OU18rxotQCrFjMPM6gf7lqiv2Y57okPaiDT8XvXAmW5gCYgrurG6kCQCnlYb978+7tx1cvmjapSs6jgj/s7m/v7i4uLhCRGnA3QDdTQ3RTABe3USUSo5P7xItI7ObNdzc//7z/1VcXe/2IN1ddkw5GB2nUQ2rSth2i7SE/6OhFWTWZJQN2B1N3I/dCqgRN6lJKl8+edV13/7B/+/YdAF1fPX94eDAzQENEQlQtYz/knPuulvk3M7E8asmuMiUJKjqZuYEhOKSYUogjlDzmLsUA4Pf7bbv9ixcf/eDy480PX/8//vbf/n9+/Z93o0tDmthD0C7evrsh9ZhYizBgIm5D7AE1FyRKzBSpOvLVJ2MFEyGhuLha8eJztuQEeUQ1LquolpzPMq3+qPGPgP7Hl1Sgf/LMJ4+f3SR4oFkpBoATx96E8Q4AsNSwbZjNDM2RiBxI3dFMNDLPNhaoRNprjQU/+iOqKOYKQGgIyOTuYylTOhyYmFwVdkJqE8c0gN7buJM+g+bd7uOPPy790B+GZ5uLu3d3N5999YLbV6+vN9eNbfCz27dvHu4ptLi9vN/1zzfXOASLbeTLmLqipbh64B/djPcNv+v4KzZFfjV2F18X/Hf3zz6/e/EWbHiJDOb5btxHy5uUdnSH6AhIxVuw5FKlHVHJTCHFwJ3ksd8dTDHG+AYvf/qDH18/f3Fz9+729jakmJh24w1AJhAvgirkxg7JpDPbDl5EBi2DycA2IAzqooaIEaBRb5AToCOhWzbhg7axpX1RHZ45RRXe3XaJu13zv/1v/vqC5f/2N/8GDunDLqRXP/qr//Zf/e/+Z//rj2TLOT0MeyYnTjf3d2nTqRsAgWFUSJbQqZZaHVKBKjovccjgFOIJoJuDewD0o1VmsrTg4slaun2d/Af5NOjtTC5Yf5h+mvvVVt9orVcnXtsJO9T4sVWQxRrk17R4dlcsAWmTTH6eEbbIZwvZfp+sdiIDPfYPuM+FZJ7AQp89ynOumzFz7belWcyRVd1UfS7ObUpEw6HXUiKzme0fdu7+8uXL6+vrqTLPOI7jCMUjpymPiWOMjIgiUkouWhDx4LIvPgo5Ujnkfjdejfb84qK7MBhgxL2BFtGpdLG5o7kDIxiYg5rVjlfGzEPJOeeYGuDQNM2YRcQ+/fSji7ZzVVBLkRkc3FwUYG4wijilAcJU3VJhiqp6zCor/Jm5IhZT1Fo7Vh2DGTARkAMhAX795VeXjP/kRz/qA/ztF7/+9e23f/U/+vP/0//h/xibMTdMCQ1jJhhlLFYIU+3EiDWq17AmRyw7vuz1GRg8Pn4CrH+AUvvHjqO4hSuQgyPE/4Fs5H0STVirCOsPT772+kgV/WuFhQUBljDTpesg1kDReYY2S0pVDfc5KxJrdxAiwzlzx8yPS+0Vysk8tZ2KDsOwads/+eTTi9eXinTYD4fD4XA4GORtd3HRboggJg4huFsupUgBcGAaNyGbqAtJoP1At7Ap6WXTxa3YoAiSZVTDahI0E5hKqdQNV1WpZS7dXUwB0MYRUQwohSaE8Mmrl2Y27B4sj4kQHUyKqxAYuAEauCE6ONSWpeKu5uou5uZQ/+lk54ZaeBAdEY0c3cUJEllCheCAHpgQncA/uLr67ptvRxt+9sHrP/nhD/qE//x/8q//+sd/9jbdlBByGUqEEIOZg6IymgBirVhL7qgGxcAcOvv9YL0+vviFzk5+L1B+r+JwBn6wQoDFvomINFeBX0P/96PBexHgfRcv8dlnV/ppamadCqwO+tz4w9cKLkItTwlzyIOpIlAtDD+VkkREd5uTDYgI50i7AFBtcCkwM5c+hxA++OCDTz/9FC/Cm7ub3W7X933OWS23qYsxQG0eE8kQwIq7U+SQ4iHpkCWb6mFs9uU6p4te/WFPuaiKg6qJWAE0qh65GlWCtUGD1NLCZtb3feq6GIMU2w+HwOnFi8tXr16BSL/fD4eduxOCmYAWMkVwcHVTqiZfNDdXNzEVN1EVU3H1U7KqpoREjuRQEMysJFQiRQhYS5QAAIDK4eY2Bnq93RZKhZvnn3z8083z+//wd8O/fAXmxURMk1NqE6oV0eqjREQnVMBCNiqoa6snE1hzgDOK+xjQTxDjj0SAx5rrRGof6Ri4gsg/nOGcgfESenQuAi1wb3McPzwVKHHUfRHcpjbuNrMqYEIAclc3VTsSiXnSCq6ijKG2Xa8IUKk+MhlCdYFVplcTm82sTanlqLloKc+uri+7zfOr6z6Kqh4Oh3Ec3b1ShxBCCBxjCCG4q6g6KBED0VscRxQdCt6Xyz280qbb9/2X39qu5H7Yj30vg8mAImCKjm61O1Ntd26LTl9js8EpZwHDpm2228vN5uLm4b70BxszMwOa5VGtMNQCeYKuc91jVVd1LeDFtJhmUwGbukwDOIDU+C6AWhkQ3BRgZFSs8b0W2B3NTNxs2O03l1fR6N27+/3XhybLN4N88e/+Uy9/8ekP/uT1J69TCjlnQYyEpUjiQOgMYAAMUKDmp5+D9RkCPElx/yj5Z3FsnXmIl0CD43GfIO0xpp2A4h/26AXBzjDtnAOcfVgAnVZBoEuoc/26VFt391qnfnJGwCTkLG9UD7p5JYa8EqVwzvOfii/Mz0KohfIAHIIjE6kaAb568eLZ9jKllAndsBQtpRAApdjEFAO1KaYUiEBFRTMQgqprvm1UvSTVa8HnI1zsx/huP97e3d/sSyl7H7MVdIsuDmriSoo2JTwbqLuZqzumri1F9kNPlK4ur1++/KBN3WHXa9+zKjEhqIqCFFIBdEeDBfrRzE2sqGkByq4FTEwLTkXha3FsNy9QE8BJa1CggyAguoAQkkF0MLXiWT56/vzm/u7+9sYiPW+77SHLl9/ux+EX/91t8y/+2c+uPrh8/eJtfjf2JXQth0jqBMBqTkDgBoBW68mtq78+wQTOf4VzhDn78BgS3wegC3QuMXBneHg2k2USTz/odLyPO4XvnzquAjYnbqC25mKT7VIVYOnqCoRYI9hqwtd026r7OVT1awmlmoS5+VnIXBuM1tgVZo7EwYkjgzuptyHChp9dXF51WzfrAWqXA8ligdrIbQqhNkhxNckmqqqE7GiqcCAreXgl/IJSe39/94vPy7dj2Odxd1fQC6iAcpV8zNWK1QbOVDUcV6+2WkesvVORmbtu23VbL74/7AGFiWr2L6gkQEVQlamdB7qjmYO5FK9AT8WhmBcHrVkyVT+qtRgdnEiRmNi8OswFFJwJ3J3UzFDcpBwe7jvmdpsEPJvD/a6L6aNXr7/4h89v+v/ypntx+a//6lnT3JuhOmBwL+SIbiR1Z5ABk6PACXysIQ8rNzoVYt4HP+8nye+PejhVZ5f7LONoUsfj+X+gEnw2lns+YQWCRzjwhAi0usuUqFU5FhPZVLHZppi2I7JOtaUcDLwK/T7ru2RUm7cRkrpTYA4BzCIHY3X3rutkzGTepjY2dNlutk2rqjvzgIwGWgQpRuLIROgqWQpXmR0AqBYjYhpEx90ehmZbEnxz+/V/+nl3k19fPAMRZ0BSckOfWgiomXo1C+JUKwpc3Q18HHqm1G3bxC0RyZjdsBQhGDGwu5ecGT2EwEij1F4m5g4G7ggCLm6CXtAFPKMLuoArggMoVonEHYDAI6MQsoHVuGsGNK311gAUDGqRxouLy6Iy9AduUqI2H/Y3Wv6Jtb/797/6B4+vr55v/vxPYkO9yGjSxAQGaA7qZB6QCAycHvjc7HEGG7CSQP5xUtCT40k4/h4OQHMJaHiEM0+Ox7eqXwPNzclWWA5LRSGYa6bWIJw1JlTZs15FgaEGhyACTX2PVbWoJmJfMYoa6F+HThLTZA9l5BRifaoBmDkBdCExoImSekuBDCLSVbfdpIYdEDBSuHn77v72rktN7NqGQyJORGTehNDvdzc3N5uLy1dd6+55HKXkjccXvMmfvX3zn/6+uX24bq4O9w/KWBPVyQ1ERZUcYiBSghqRaqZuYiamCk4hOODucLjaxq7r+jwOu+Hy8mo47MwUydE1F8l5qJYCUCAiNRPTYmqg9UMmKmjFbVTJpl41KCYxZSJGUodBCoFE5sC8cSKDBJyICEBzKaqgrgB3D3ccQ0hxlNLfvbu6vm63Lf764V/+9Gd/87c//08ff/Cv//ovH25+Fz66dgyjOSLnXd9QeHb9bDwMDw8Pm81mSVqCRzT4SfX3LBD4+4EY4BhNfAb9KoJzqRFCFJEKMNXpVm3lyzRwdqT6SlCHR+bK9cHl/AXC62lPVIbDR1LamQKxPu5Vap9S45YFAweolPIw9IsEhUxhipcDWZWIO2oCiLWoG84/AToACVgAIoKE3HHsOLYUai1/MGFgxgAAwTEipRC6kJrAXvLu/iEP4+XlJSKIlpxLo/rMm+5Byxc3cHPgUYqOOWfsklYMNkdTNAdARURkrZZ8N3UXdCNEQAxs6kBo6GPJZZRRSiel27Y5Z0KCRABW22EN42EcRyc3QmMnZnEYS+nH3CdQVQETcEfQ6j2sfUIRlIANxJ1rbiA6jo7kyGqMRlAqS5pq1TgDAYIGMEJlFIJwUL05XKTN4WEP+0O43r7Lh8yA4Bec2i654G48FBFIQQJakTOIX/j8Y1nA3QH4SeHnferyGkHW91lHHz+ewDlGnbojnsS6Nbt4/NPyNay/Pwb9x/etBQOXMxfYLaY411wARzUX0xr4hbXo0qpAYuUPOFfJXQ6amasBU81XVXMwJ3N2aJEJsA3xIjabkNqQEMBBWUvk1MUkZDGEbdNetc2mSahyf3fz3bdfu/vLl89dTUxyHl9qfKXM724PX7zBhwHFBhsHKSzoQOTEbqQO7oKACGwEZu4oBlLRgsmQFEFMlQyYxEVdAE1RzeRu/9CEmELs8xA5hBAe9vvNxbaoiBVzVIRRbZAyatmPNQpCi9bqKVPAbQhIhK5ugLWRqDuouWRlBnFDn/wnteqmmCJwAKqli4Epkw1ePry6zmDcNXfD4e7wQC+uJReIdBW35bDb78eiGIi7riNO7x4eamXOJdsbZu5dgWRNsBYohkdk/gzsTgHxnMPUsZgf1zC6xsA1oD7JoE7R7BGneo9164gAT0L/I3Q/9ucgP9FnENEmUuHqYOBFxWbGoyK0ynZflhVW7KWOWgYLAcEcVF2E1MkwEifkTWy2oWlDbJARwBECxU1qNrEboSSO26btYtMwPdwfdjd3h/u7tm2xxjCaEuKP6FnY7/OXd/j2sKGGCA6SJZJZYeXWmAzZ0REVwIkCAhC6qgOaoxIgkhOKyGHsyYkiUeSaZm6gwGiE2Kam7cSUGFJqOrlQ91FlFMlg2XUoY1ZRh93QV5pRE4YU3MQBQM2IiKmWAEZGMgBGKGZAVNzNTQCWXjuCHhAEHF0NgQBHKd4fnnUvdug347CN0QHLYbjoGiZ697svt8gfXT0PSO9u70Yau+vruO3yblz3lYBZBp7y+OZ2kUeoeAw0q/EYH2judX1+/JRs4wx7k33lXI46AvH7EODMlHT263IwnJm9nkSANYziEl95qhUt6fu1s526qWlRpbnrE07dSI+F0deS1XIk0XR/VQVRdgghRuJaqLDlWKE/IoE5AEWgyClycLOGwybFNjBIASkN09Wma7ttGxOjR+LUNj+427z56uu7336XetvE7oDjg2XitoB2tQ6LUa26YREFrIHgAI7sU/1oEzBwzCq7/tCmhhN5rYgFOo79qJrBWoKCLug6ijo60zgMCkgxRYI8HkaRQx6BUGCyqhlMZL460aVkqn0PEAkxEhs4o2dEQ1REAiA39yk529AFjVxBAAgBXPqec36+eWmXnYv+xV/85bNPfvDVl79+8ewi9/uf2aa8u0u37+L11cfNNVxtDpbffffmojt20VqSdKcWJ6tOLUeo+F4EeAy7TyoSsGrbeEaOF85w7I5RDenvx4EnsWL9xPXxYyjEGaWHp3jC+sp1zsGk/s7KoripW656jOiiRlc/8VQMy73W7LVVIV8icpmVG3NCjBy6pm1TkiHXWjSROAJxbRCJFCjUCk2udtG2l9uLNqbcHzYppufXbUwUuEmBkSiEtOnafxjyb76zb+6bgqjWiw4A5gURIxCa194cQmiOBcEAnZgI2Fkd1bKoKPioZZTcdR0GHGUYhwEMDWkQC22rSH3JAv5w2ON+t9lsPv7BD8W06gDvdvfjt1/v3uVhyHTR1h1F97rLhODupRSoBUwBycDcAziDDxQCYSBknDqgVB+5uQcHNHF3UGDjUoqq/k7ftfF6JOligi/e/fz//v/umPdff/vl3//i7qtvXPTjn/7op//yf/jDf/XPLn/66c8+/Ojb3Xfuarb0sIL6dSaUNAP/IgLFNaicwcwZlK9BaP1hQYD1ORNiL/WmcAXxcI4Aa534fQhwBv2+iEDLvPEpU9SaA6z/niS1zPF6UGUbNzWtWrybWi1GP5OTUkotvlctSzVpP8bYNE0gcpfK+g0RzSOHJsY2JInQhtiEmJBrve3aQqe2BdimNmB4dnX9wbMXXZseVA/DsGk7NhhFrYg3FlpqYxo++2L8+qYdPSoc9kNRsSbspHRNrG/DRlxLhoILgJgTEfz/CfuzZkmSLD0QO4uqmplvd4uIjFxqr270iu4B0AAxIDkjFCEfKDJP88w/yYchKRBisJADDLE0lupGo7u6qrIyqzLWu7m7manqWfig5h43M6sxliEhN2/49evupnr0nO985/s4MBmZYcVqVrVmqQaehi6mNB3naT5erHabzebm+fV6va7jLLUm5Ne//urh/n5Yr69urg/jcaoFA/WrddevkB/VC1IAdzTTJqABDg4IEMMpzVjyIjMDUp8CRfaITYJbXY2IIqCrpUZ5VyMGNFfVnPNfvPuFf0Xa8//n//X//sVf/uW//Bf/rC/a76cXcfgkrZDx7i9+/S//yy/+6T/5n//g//J/+G//r/9nDx9O7GUJPvEa8m8BIRz46QH+9AHfiP3wBL789lo845DfWLjn2Pq1wvdbG+DbW+tv2gDf2Ie/GQU6P+6/sqH9yeOfvr4zsbHhhnRa+u0EaHdlnmcTJaK2AVJKwzCkEIw5IIZWGZubGeHCnUkxRgqRmJkJsI1SEmKXYt8PwzBE1N1ud3V1lSJqHu/evuHYmeo8z8CBhiE5IOLxzb0fykAxuo7jKIShS+U4JYpmsBgYnt6steQGoVmjA5gjqTXN2+qEXdeFFHW/V9VuSDc3V/2LF9vt9v7d++k4bofV/v7h9v17R3r//u7Vu9f744H6hCnUWpEpxDiDm5u56ckZtn2kKS2ztqpqIuqG5ugmoQNv8mQmpmoanBwDqABAQHI1dLLQBstq1rh/f6R1+k//+t/dvXrlbx423P1guLE3twlkrpk6GDbh7edf/Zt/8i/Gkv/gf/zv/Ym+vj+RJzmHwqdpDPGHpeL+zbbUb9wD31ij5w1w/v7TzQCB4Um2/O3l+hvX+tOF+hsff/7fkMEIKTByi9CN0m/mAIEZERnQzRuFA5HoJMpZTRExxIiBYVFtADkVc4iYQghGJaDkecXUM6LMJLnMx/271x7o/rAvrpeXl8+vnyNvuiBD2tD1s1qr58pqHVDPIRCb2TjPpessEHIkpGAYDcjB+/fPV/O9TP32+geXn31iNyHjX9r0T1+/BZQX2937UMrHl9sV/R+n4bv/7PWv/8svr5xmwld6zLuO3C3LtYZwMCC9jzAPJxdog53ahqshZZRMboE09I/T4e3tvgsJNd3t/Zjf5qo/+u3f+/4f/gEQfvHu1/7L+3B36O/n6SifiH0SP5IDvL77quuprvu9lVImBxnAdZ5+jtHMtapJ8aoRoWPumCjngWMfmJytKioyEjPe9jmCsBE5ABqSA5EDQIgVWQJ7hGpaqswli9tXwz6tmOoYoZM8x67/5f3dO51oAwAzAASAZPVTSetfPPqv/unkq9//h3/vldVXoRy2cI+Vh7Sf5gvuNsCdIlQVNI9cA1a3NYKZtuKYmYmAF0MxhpYOnOhNzJGIENTdT5MjAC0+EskHFioCccu9WqZhbuBAQG1EQURcdNmKrVl+noh3r6ZwQmf4SVq1qFWfOAfnkH0Sxvr6Nv3a3gKAZmLzBMBaDJ6ePtqWxzYHXwZUMCKKgSN069gPgefDfjzO7969e/3q1T5P1a3frnfbSzNrHSIxO+wfyZzV+8WSFNEc3Fo5uHym2NRIEKFBeOvri8vu4mqz2XRdxw4ppbt37511g+wDuVqZ5oe7+9t3791dVUsppZRmkeJfr4L8NAbkauCexSBFRSwqE8Kkioir1QrU15vNq1evUkr/8H/3j773gx/cvnmbS4FyhLnIlGepRlIYmlqwgbuoaTWtBcqx5vflcFv2X4ITQQqBI4ehvSc2hP08BZkHjOt+WG3XZH6cjvM0YumEFvMt9vYZ8cniAJtApbqVJsxL4L6caMtgHQIA5JwpIIARoIADIiOGSgD2j/8f/88vbt98/0/+tl52ExTtaSp5s1rLcRYAUoCqgg7o1aGARVgi9CmKE4Ah0oLeWDsQ2mdrjT9yvvzradXTdOO8FNvz4Nc7Er+xafCNH/yb1vM3fiQ8ASZP3P2v/4yeTOMMF39pWEg9H1KFhS1v3uQ+mwR2c+9SJCMLzKA2Hcfb29v3b96+fft20tpv1qvVarPbpr5397mWMOcDSAQaOPYhNImyto+ZiIip6QKBkSEDKSEiblfrm5ubYXez22wH6tB86PrNZlVl4tZJM5mnaf8oj3f3HWBtljuioOYI5OBPyE7ePBDM2waANqbjlmt5rPkgklWMsAvp9vZWS/nD3/m9v/WjH+/H4+svvhxW6+7xQURKrZMJMgQGciCD4lpmKZInnw6kt17ekLzrfB+ViAhLg31RjdwIbBUjmkctQ8lrXg1djP1A1yu93ZupNlaRQyQGZLMTE1G1kSyqqYDZSYSnbYCcc0+BiMbpSJEQEakRQqndvqrxl3/91WOdh0+edfFlEXdI++O+v7y2KReQpnJqjIamhsUVsjNzCNSYAl/rlmEjwYN/wEu0TVM87S18ePDpevp1Q0pa7P/AJziZD3x7QfsTWfane+Ab++f8v2HJpxfGpcPCWFkCfBOzd3cDQEBCYl90rJbnxWUaBs3JAR1asvyhqO9izcQGdcr72/v7d++P+0Pbwevt5ubZs5ubm5RSzVXUp5y1GxgWWCCFmEJMyMsvCtyeVs0EidoLckqpu9he9pttFyIaBIIhpT/4/d+/ff86qe/roY6zSS7Hvk5zjwjm6NAcVJe4hdygqXNrAxEB0dzV3VSy6ih5LHkUMXAHnPJsJn/yd//eP/i7f3+8v3/961cdoB+n9DC5q4HMZIUBEMk0mIuVWvIxzyPUQ/L7oPcRHteRDejk/UYMwUNgTIR3t+/XqQ8xHMv4sL9npN3F5mK7G5qDjrsbIIISFFdRB0RwF3D1hsK5ovvZmf1UfQ2riIGnkiMmJyeH0PR6FQFKdbtYbx9ev//Lf/9nn0asN+tSi9X8fnqzDgmdmqQRMBF0GknAoGgIAVMCBqPlxhERYUAEpiU3sZOIM6A34TODUy3ri9nP12MvnEtweiIc/XT5/sYNcPYf+I0P+PYVwmkDfIPX+vRrM7M2owIeGq0TF79UOEGWbVUFRzi7qbV+WdcbBnY7Hsdpf7i/vcvTRMDrod9sNuv1NvYdIiOrIQoYBsaTrgoANC8GBgRuvstgCGCu4O0gZocU0mZYha5jIlfnwH3XfefjlwEk393LY87jFLEjEZsLnDCNGAI2r19faNtnjiEhNaElRCy5zCajS3YVcGRydAZ2rX//7//9v/+Hf6ccxl/+7Oer2K1Wqy+//PIKAFyAVN0zeCV3NTKvMqnLTDoyHFH3pdzX+aHOl48lEscYmQCstRmcwP+bj7+72263q0FFDg/3h8e9VePHCUMPjVXfhAMQBRxMkek0XWnSqLgIDs4UAC1xYIVaq4JzCAKI4IhgRI5uDs2nNDmIWKnTn//7/0jPdoN+dIi+utw9Hh54s1ODZnobuhQCIkVDd8eWUp5qAAJu6YEuClzLoargYI4c2y5BADzh4Q4A/mRZw5OM9JzttJphuV9/U/h39yfF9NM98G3HmvYrAkNbzx8ccOGU3NsZdwJ3gHYMKDiA8xNvbnRoDosNlmHAwGFp6CKqAVBIhAUpT/O0P4hL36fv/fi3VtcXm4sdEomqGLiIonbk7uZLIBNwZW94KS3DVOC2iLu4goFhJLYQQ0iMRGCM0AUm8BioOGieQfr1areJXTBQVa3VRNydENt5DE/mAJ9GIADohr7WrFXEvIpMpg4QGf/oj/72H/z4d/cP91/+1S8Gjh3h/eu3HdCBcjEtWsXFBRSsuqnD5KWyFcYJ8bHU/Xis05FUfjANF9v1y5uPXrx4cbm7GFZ94kDg03Hv7m5CDr55UZ+XNvL258fbxrT2Ni6HuMxZmimgIzQ+qRM6uDXlMzWkwEyqWmsFJo5B0AkZEAzJwMxUzQvZuB/jup8fxsev3muXDtETp7KfsoVZTau4e9qu1l2KMQIRgqtIyZkwxBhjjBDQCQzRDUM4TS+2Zect5VxIwi0H8lPd1UK7PYHCEPF8PPrXE6ffuPoBAO18qiyj76cY/YFq8fSsCUtpu4jLNQLz11AnA29lOBI1HWp4gs4uR4c7t/INiIgSceQQiBFxNI1IPccRY3Du0/D8enf98vnLH33Pu2CRa3NpNgNwdYzNPAsAEQNS6/J8qFPaFLmbuJMaOUFRM0Lg9uvQvZmGNRYxSD0+3Od1jMP1wBFrJUDXZrtpH8SVEOm0BdyX99uISbReMSgZq9a5liwldcOw6n7vb/3O+LB//bMvsNZ1vzrc7Y/398+fP38tezOptUip4I5gBjAHnyI+Sj1IPeZ5mmYs9ZO43Vys/29/+Mcppb7vA7FImd8e8zyXmnfrdZnHeZ5dNca4ivESO+fw9sLneZ6mKZfSHMpbrDS3Vq60yLnEI3dEFBFIIYRQVXIpMQWOQVQUDJ0QrZqReQUhxS7EkDqMfPfqDV+s1y+u5vcP4DraoUV6IFwxhPXAQwfEJjpN0zwXAOj7vh/WMUYAiDGGoAAQYhug+po519MVfF73djJA+VqEpg/w/7lupq9rqT/dA/gt+tDT6+mPtL9P2qD+NXuLp9fyk4hL6v8t2gQCtOyfABGBAQNQRGZiBqQ+BcCOeAxp0w8vn7/4/m/94Pu/+1uv7t9PaDnnuVZ1TBxi6hoOC4gcKJyupUPcTiICbY0hN3JAN5jFMLT9TQ7uquq15hACI5Q8Pd7ezR0939xo7nTKodFIzd0MqaGIRERWl96nLsHDVRXMHw772ayanW3lL3a7j5+90Dy/+dVXHfOw6d6+fo3qHz27qXm+Dbm5MVJpFS1AcHWELk1V3h0f9/s9jPWmW//285c//PizQQBLlVKzqmo1UXLvCfZv38YYBw6GVEs9jhOiE9HH3705Ho+3Cli0aG3kuWYFQOoeCAEMENv4mYPTaSKCSKuJCMcARFVsGVp2AHc0IQMCuBiupiIY+c0Xr64+/eSjH9/8+uF936c811LrXAsQUt+vxEyBAkmWPObDfq+OdaVNGwYIVysECMzKAXHhUDi2hA2gnQXmH4oBO/1pI0dPTgA675PzG/mvnADfXrrnBzzdb+ejJpw7c/REUw4AzKw5MbVsvnGz+r73XNw9IDmCq1UtiUMXk6u2aB2IIzGfwnYfoquZapfSj3/4I46/tb3eGdBut5PpYKW0nNvMBDVySIQoYo6hX/V9SimieZUCgbPIJKVIbUzSLqbEARyz5J56VSW2LkaQ0vVx0ATut+/ektvj7fvDxZ3LmsWkFAIgwKpKTswMClKLmbUGHBO3Bww8MPM0TXn/wH3y7F2IKXIE+rt/9Mdf/vTnnTsp5DL1KZno4bh3973PbF6mY2+46zf742Ga6uWnH//nX/1iNGEpz9P6Zrf5zu7Zp5urncW7cnBfdqG6FpvNjIGh58mqFFlSAm7Me7n76g0RXQ3rTeofD/vH8Rg4rnbb93e33KWiUk1jiihqoinGx1KGYXD3/fFxHbuQoruHlCKaoNdGtQjIIbABuGMKh/tHjhD74e2r1y/uPnv57Nn7/QMDgruIGEJ/WoiBuHnLzHMZxzGP8+HhGPvu4uKiLZsYoxqkCDFG1VpKwdjBiY7fLhFpwlsN8GkHSKMLuLsjiggz930fY2xzAucw/+1131rjhGRu7qCyNGaJqOFHHzaDmTcyHMAygdriuX/9KZffdNo0sdEQAAHRCRgwEEdmBWAkRorEy0pakitGhBgCDBvfCSceVkMmnasxBUQEI0AgCiGEGCKJBcDulEG1kFDBSqnZZJymh8NeSx36/mqzSz2HFAMwOZ1SSUdQAOj7FInA1EVUyIuQe6KgdVZVdG/WHmdV3nbD6MkBt9ABUthcXnzx+iskkFK3w/CP/uRPMEvvxKbY4Bb0hZjmRoD7/eNmM5jYl493Nzc3G7V/9af/y9Xu+W9//PKTTz7bdEPdz/nxIIfxQBm2YbFBN1Sr1aq7Iyx4uaM72aKR0Ro31dEMXSNCH6KkDpBBdLfZOpJPZiqs7g7BgLX59wCenMyhKdYgimgld0ZiIiRwMDdXvR9HXq0opsNxfvj8yxff+c5vf/QRY9BS3ZA5YiAKiTEQMDX+d6nj4Xh3e4+8J6IYu4fd/rPvfGJmzIQMHJiZEJEDLsmEAzgQYPvDSOaGDqZabalZvfFPT3MwLfy3Fexfz6M+JCNPsKTlADlZ6505rU//1d3DYpvjp96WLRj/edWjATRWghm4L694Ac6JAFMIzNxWf2jVFlFoTSwHcQCnwByHVSLkSNQH15k0M1EgUgdGJOJAHEKi4zx0/RC7PiZEFDNxLa6HeSyu94fHh7s7Uk8cUojrrvcYIxAbCpB4s8AwdwuBREuejrXMxkFLNRAyLyKuhg4M6IgO0LgPH3juSwEE7UOftORamJkji+kPP/vud1+8/Omf/UUyA1VQQwcnVMTsXlVjtYRxP839btNth5+9eXO833/6/NN/+Md/NwpStXo8lqpdiND3GPir6X2LdojopxvMxmfuFwCgI7kDESJuDdUV3ZG5i8kA5io559Vmow4zzuhADhEJkCIxgSIi0SIvYraQT3OtHgiRPRASUyudAF/vH9YXuzoe9zXvb+fXX73+3b/zx+P+WF2F3AGb2aaZaxE0QHOrVqb58fGx5aEhdlJtvV4D4jAMacleKjE4Nfn6D9yz89USGzlJ7Z71d098KBeRM0/7KXPuGxughbCnjOP24FrreQM8LXGD+Bk2+ubTkQP5Esbx9CciNS4aAwFBIArEjESMDMiN3nOqXNFBYdnETJRSil0wci+gVcyMgJmRMCAiAaNDEO+HsE19DMEQKlh1Ha0+lGku+X7/MM7Tths2w2o7rFapKxyDOSsILgoh5OJoFGgq0zgeiGi73mxWPY9otbR+BTO30TRb2nwfwOOl4EFsoQeZ3r+5211evL+/23T9Dz/+7M3nX+a7x9QNJGamTqDgVfUIZdYMs6QYs3ohLCa/fP/2sl/9n/67/x7vRzxOepiie+hjCfzoZT/lPNdSiplxbG49BNCgvMZ/bGe9ITobEJGdZhsiIcUQMGqt4zhiDBwTclNUJzULxClEsHpeHI5QbTF9bec5GrKaAWILxkR7kFLGx8OhWw+GMM6TmeWchdGaQJhpKWU6jpGDxci1Sqk5lzIVVTegbiARfXw8rFardrQ29KSatKJzwc8b/I7oRNjyn2Yu0XZpq4zd6US2exr7zxvgScRfFjQjneHXpyH/Gw87lwGhnuim5N98xqeHRURqpHxGZKR45i9DU7CCVqvy0gQ5NVYRAgWxD6sKT5E152xVACByWBiR7iayIlpzaDimSFH0o5Qxz3uZHo6Ph2nfB7642F7sNn1gVoXUAbfFYuoiZgEcwAAt50lVLy62V59+/OLZ8/51NFVGavZn1dARxI2ZHbjFnsV0jWg5EtWwC93Qj+NYpvn55bUcpuPt/YYTTAXUAcEJCtSj5QcZjzIHd1Ffb7avHm5/+erXP/j+9//O3/r9cpxuv/jyAmJPTExjzfelPEKdXcmQm/gXLj0+O42eICI4NYgTEZERGau7glc3M2EnRSimUy06HddIIcU2vF+/nu+e7+wChyNEigXdDEQMnbhxCwFt1R9UD1L7/oqlPjzsH+7369X2oUzIRG7VrBSZ59KFDB2I5FKKVpFaczF1MPNj13GKu93mpKqLCupm7goNdEcA+JACOSCYE2BoDCJAE5UqCsDct1fesv92LJzhIPjW1JgBGvO5UP6Q88CCjcLpm217hHPZ22St/MlmaNOPp7ldamrj/bLQmRflEmqPX3LoJxugvTgOHZgzOAGqtl8MqqpFtTWPkZAZtOWgdtmttqmPSFprMc2uj/N4Nx+N8WE6itab68sXL15cbncJmOQM9eIi5ADqoIYwzvM0HQ3t6urZ8PLlxfaCX1ctNXK3RAIDdSMHdgcMJ/Os0zlgbmYu+u72sLu8fvXrL7/3ne9+9+Unt1+9XlE0zUEd0ZGwqh40P9TpsU5HKV3s3P3+9ZuHh4cfP//sv/nh76yUfvXLz6+26+k43U6HogVS8C44OYHPpbb70ZZsrYqIJ+wLERtZFE7QOcwgjk1KUdhICTSgMk61BNfEiTmBe65STLFkTNiSfsSFsI6IzO35HdzB3EAVPTi6+8xu7t7Hin6cp59//vmf//mff/yD7yITMxsSqZiZlqpVlDghB+aUUpcG8+K5ilgt6qIi1rK7EBlOJNuG8i0JmTue8MPWSG3xVB3A3ETNTLt45sA1FA5PNNVzRPcTRuTujb7+lDrRHsbM+iQofDgB/LQ/WisL/UMl0Uj8kZqjDkQOKcQOGBH5dAFAG4VcypyWchHRqVPW5mYCR3ZzVXdQUxPB824Dal5OjMTIl6tNSh0aTHk+1ny0+v74+PbwYIHevXvXUfjex5+uVit0sCoJuaqoBXqC4TbvN9ECAOv1+ub58/7Zs4EGZgOiZtxtZk0TBJvrJixsk1Mz0VyXmzfmKcb48uUnf+tHP8SqD/kNBirHqe97ABDVo8735Xgr0xFVCEayvD/ev3r7/Rcf/+9/74/r/fiLX/yX7Xb7MI+jzEeauQt9HwBgejyM+0MdNqcNiS1P7bquW/cxRsIA7qUUzVlEGkeQXSiwEwoYmSAxpZjWwzhN2cQUuxBT1/XmRWoxBQoNZ2MgdqQWo2JERIbzhCuDgYqZal0jEndxXd2M8P7x4d/8u3/7WzI/++xj5A6YgKmdJFqqInniGONmtb28lFWROYsTbrfby8vL9XrNFNuBv7iDozdqw7eRnHPq4V/vCSzV0QksaqEBvpX8LBrKZlZlGb36OpG7/dKnAGv77SdlOAD6OiOvbYMPRYrDMrwCS6QPIbRK19DauzpvAKKTR4xDUUEzDNiOhROXadnKrcJ2cgIiRCIeuj6E2Aj30zQdND/s9+8f7x+Ph9vb24vNdvx0rrWO2Uyg61aqakaA7Pi0n2sxxmG9ur6+fv78JlxddZL6XnG1hv0HPOSU8nsLPADQEv/28YtIKeXm5ubdm7f/4E/+JIX4xedfdhTKNG+6npoDtmnOeZzGyXLuAEJ40Prll5//6Prl3/29P4TH6fYXXw6G5HD3+KDrIKvuUKZX7+8oyzbGq83610RtKZqZIjOn1cXFxcV1S3JFRCcy06LipgSeXBKTMzqAuBEyxRChr9Mx1yKmyLTq0oA0l1zmfC4xGZkdCZA5NpMvwHbIB0Y0URVRkbjqySEAmujl9RU80hdffKGJ/96zKw5IfVpwQjURIUR0auIlq9Wq77GvCkwXFxcvnr/cXmz6vj8ttVMr2Jc98I1k+3xHnrbD2rpfHLxV22ZonqdnG98PyY9Z2zDt+eE0zXyuhs+bgU85EjQ2aOvUtdVMiI29GIgIMDSMmLiVtlg8du1hjYS+NBCIz16BH9pyiGgIW2NA9qoV1FIoSI/z/Jjo3VRhHaV4cO+J6sM+Af/ws+/2iQ91utN63+F9GF49lC8P5f5tvv3ilrOsN/Ym3v+X6avN5W53fXmx6lYoj3bo1FaCKwAzyQEASCfG4Xn/W5uLZ58Mj3jlXfgo/csfxf/2PxVkMg5zlXnWosXMAuBA0UpFpxCCmk0izqHf9fe38g/+6B99snv2+V//5Zq61Nuv37/mm6sHE1qt3h2nYx/uanysOKyvXr1/9/n7n/cxfffHP7zPh7fv74Z1nA95Hh+6rrudp8NoGiMMNyPVQ1aa8C/Ce5vLYNQVu4rDH/34dz66unn16tWx5AJW3TwQ9zykIedccvYAhjVCSshkbKOjGhpvpOMJ+j6uRk/l+Hxz8YP189vD239ZHnaXz8NU7TDuNlc9pemYLcLAXQ4gBO6qJgwagxPB//CKuq7rus4QatG529bLkDHpT36++u5HV9//ztHlIBkQC0EMa1QAK8OKNMbDPDHZ9vry6tmuuwycXNfVIwExOUJlBJAAqgIGDEv0BDQgKFLURE1dNM/zPE2qykieay4CAK31aaJKEmOcjuPi4ITtKKcQExFhXLfIVUo5N84Q8Xg8tsFDRHR1BV0COjqcldepbQMAJAghLBuAOdAC7BCdA/2H6+kO/vbl7kTUav5zgtF2sJgG4sTBirr70HcxdgDgiOpeRac8HQ6Hh4eH+/v72Hd5zK/fvMnj9MsvP7+8uf7OD7772fe/t+vhZrVdd71XKbUik5jsj3maZwrcdR0GROWIKXaUhq6UQyMMuxoDNulFJtK5tgzQzIAxhNCkcBMHRhrHg6shgJimvmu6I6qaSxFiYHL1cRwPhwNU/fjjl32I8zxLzoECBB61TLkWAo1c0XOZx2kucwW1kqbrzW58d7fC9Pu/87vbYfPll18CQKm1NrEgIGIGACYKISCbNVjQYSkYEYEAmAyBmTFww23WKe4uL4Y6uWoIIQ2MiNUNA3sgw5PQKRiDJ+bEFMB9IiNsQ1IExIg1kIG/u7t9+fLK3c+Dr9RgPXIDV4R14tB3HnF7fXlxedkPQ8OjzEya6CsgAjjxOWcHAGi0FDWRJqOjJ88IM1FHo9NEWFtg9IQUtFCbAFTkPDbQcc8htDnlljWptfFRk5zHaWrJS4wREEE1NKCTYZEeYCQCJIAUYoM12+pvSD+eUNXzTvjGEfZ0SywXoeOJTN3gLKsqEhmlWgqBHcbjMRlv17suxCq1Ooh5LmWa5uNhmsdpmqbOgwEC4H487sfjq9t37/cPr+9vP3txqZ99t7t51pmBKgLsZX473d/VyQm7oUciIMdI3Ie07lrOs1AnEAMSIBDgrBIADUBd0ZljEJOa87BaRw77/d5cGb3WOgyDOjhSriXXMjECobrtHx8e9g9rCj/86NMElI+j1uooFMOxlINpXaXM/pjn/ThLFlN31TWm8f3ji+319z76JEDIYzbH29tb6mIBE3A0ihGJCIEjweQTIbpKcKc2GonYUuzqVt06gCL1YOMQtpvNZjuvNFenGLtuLhVMKEVxm00KtE6wIUKzP02EIgYMjhaJnWJFnU0Os745vL+275svZQQApBi71qhHcKZA3YqB+rS62K63G8DT/S7VABqHmplrtpbPwJkCJNpmxO3MmXZwdVVDRBNthYo3kf2vM3nOeI4/Ea8+owgt1WnVMzMvFioAXdctifqSAhFFJCZuiH4gQofEgRBbb6ut/lMBjudj5Temcd+4DCE8+X6rL60WB2so0/Fxf7x7+OzZx88ursi8GBbzqi5itaqrMYVhWJf9zCF0ses4iJS5lte37yat03zTxGZfbLbRvcz5/eHu9fTwKNmGGEIwcEET1BCMVzGkBKauwKaLNKlaGwxrxhWmRgjES+H10fVFF3l/N7X3q6r90O+PRwOcrVa3qq7EuZS7x/ti9fu7q482l/P+UXIhDod5pOB7kNLHzP5Qp9txPJYcOKaU0ONKakjDjz797keX1+9//VpziTFmEzSsbhUMnRQ8UqODe0VAtWLOJrQsqmhuHqhMea5llaK5TXmcU0ohPttcvD28FbeaKI8ZiCPDWOukVRwEPbgHOtWjBhMjg7NpUHS0UeudzPeWx5ophQpiADFGIkocyAHYm08NMFKK3TCkELH1lQFVwdXRzRyQGNSKafsY7URSMNEmYIFL1w+annEjvNgJDmpcNWa24MyNDOwmH2B+YkKgNqmLiMBEGCITaTAzBXdCJ1RVJ2zKYuQQ2ifLSKH1Ypfxb+RTDdBKWj4Lwv2m/Odp7P/G9x3NkBbNG1c0bXr2gBoJpeT9w6NXu9zudpuL+XAsAAqsQGrohkSh73vcwlFRbJpLXZDswEhBzF/f3kmjBn/3e9uu3x8Pbx8fD5aPllfrDgMLGqHNVgiA1oFjcAExJcDgaAZq7m3qoo3agIE5izf60+VuQ+CulZEAzBAcWd2KWAF3wix1NpvrfMzHruu/e/WCah3vHgmcUjwci1efA9aIjyZ3ed5rcUaK7IRuuPPuD37ndxPxu1dvGPFYy9u7293N1aHMBa2CgVsVi0ABkABr14yWnBwZndENHZiwi3Uei4qgJ2ZUq7XO8/j85cWB7/NxnBRyrRhTtfpY5xnNFzlK4HZf3dVsjgHdAIxMtcIo5a6MdzLTbug366pS1OLQEwG4TuMh9VytdWiRILbV4qJtoLyRDlUURNUBES0QEWmVc45uIvM8mxmc+Bq1VsmlqYVC35vZskMQIcb2YkspcIrCCyB5Lk4bnxfAwYGQKTBAVIld6ldDrbXWqmYulZlDREZEbk2u1tYFbJVx0yRrq78BQUsD71tZ/vlM+PYJ0KZYlsMOzFzQFcAYIJe6f3ys03y9ubzaXQUkE58QCgBQaISTSKGLPXY00WjEAoJEMXJiBqZiXkTK3X2MXZeGy+3qOB7GUqwj5EhddERDULTJZgCkTWzCRO5OAJGZAAqgoRn7XEsFcwQ3UzMMPGzWm76r08xIBKK6lC6KUN1ETQmy1MqkbuZ2eX3x0XY33z1aqaHvCnohmqTqEPdSH0oetXCKRIyGqopOv/W9H3cUy5xzLloqhBB3m9t5LAwKXsHdndXIITkSoPTB0fBEXXQiYHQiCBECG6MBABOn6IRF6prT9Wr7bi6lFEX0gBm1MFR3J4y83GICaiSFnFDVXM1NxHSU8ij5qOX68sXm6uLoMtUc+s5UXbSUKg3CcjfHoArmbTsRoJubSM1F5lxzATUA6LdrZm4Qn4igQyllHMeWvbR0o1WxC1hkCNqcpBwJgcAV1A2dTNzBgQnAqeUryMS81AcnKZelvmUmopgSh9D2jzblh8iMiIGYiQPQwvNxWECh0+o/m2J8gyd3/uJvqoYNwZpelotDq1Sacpk/3N6+fv1m060//uijGOPj/d6qHMGQmRINSCvRVSmlCBpSDHGILbFr/cJq6lKdSQhe3d85wGboU4C46vrVKsRAHKoUJkewfZmzYU2gp64QIwE2XBAKKiFYzeaOgd1MRFahu7q6IreH/UMiquJmFmI8ziMygVFVqSK5Fg89RY6Br64vItLdw2PoohEeSvYuzZoh8DzP4zSp+9D3AUlEEvJ2s76+fvbl578kQA7p9uExrYa02Xz55te46oTAEEGd0Fm9AjK0s9vRjQHVnV2rM7qqqTEqQjFhhY6YGcHBxvmiX03pOM0P3iVlzK6WONel3CTEipSQDUNAPIA5uJqKSFWZrYwuhWF1ubt4flMe7qzM7t4ogIQuJszs0KQGciq9JmHzWqqIlDnP4ziPkxZtLaYsmjio6jiOLTWvteYxMzOYGxGRurvWpsvC8zy3JB7MAdBEXc0RUkruJqpQQePSz1WzEJK729I9PE9GIDXmJUAIqeuQKDTANBDgsu5b4G+R/uuL/ullT9b6N7bBt1c/LIQWMLCW9rgrgjG4S71/uH379s32Oz+8urg0hfv7hyGmES1wCqHrOWxUc66SBcwvLi6O4Tj6IZc6S2mMhhC6g2bkeChl+urXXQw3V5c3wzU5xBAVMFfBQGJmZU5mE4lHdm9KcBgAl3aYO8Rg7oIeA5u4gccYLzZbF50Ph+1262qqGmOqKhADEUqZq0qpNa2GFEKIvB5WOuuY56vL7aT1sUw+JKvkSGLg1Vi9Zw/gWvxqs/rk5uWb2/fQxffv3hvC+uby9vB4++r9+ubyIU+V0BEQAdUZPJqzY5XS3EnJidGDIVlFIBGpAIw+l4LqQ+wgITnm+/2w3gwxAYADCPhsEkKfTUihuZ0wcReSRXamo1VzU5dqtUrNroWgMqTNan2xuy8TTnskB8MUI5gbSWvMWpVSSs1zDhQ4llK01uk4jvsxT7OrMXNkLqWklNx9nmdETCm5WpMzbRVBIyQTICEF4nnMH2pcXwRVVNVXIKpFpXnch1xjV5m5X38YpkFsdE0ERGhuImZEFPuOU2xQaaBG8GvJIACYQxuVe8KkMzPEBfkJ4YPIs31dQfJcAzzdG4qOS0NOEBQRiJrllf7iZz8Psfv+979PgadpcoQ3727Hq3XHxCmlmNbST7Gb+x4Acs5iVXTgTkspZa4mTVuqE3UzYbdjnkYpYTNcf/xRllKO03ozWCn3D/c9EzDttUwmpcwMzu4lZ1cz8HmeVJhT7CkVqfM8E0Af09D3x1/dDinm8ehoiP5w2AOhA+RSWitqte7nkqXkly+e9yne3x6G7eZ+PEyg3sfZvSJOh+P4eNh1Kwfxw7zut9cXV6uuT5M8uIrIiH5/fy/3t6HvZvdfffkFrLrKAIjMPHDsOTSXQQNzcxRjxBTQm5RTla6PbMHMHCF2HTqWOSNyjIRV+9SZmbiFFDWXLobGhEhE4dSvBLUZfFyxmY3HY7uB2VWZs8oPfvvHFeyjTz7KVuZx6pgOh8ebq2sJYGZVhQN2qWdmyWWqY8PuylRcDcy1aJYZAAwWQbQYIzO3os5V51xqrUS0Wq2aRmDj5qSUcs7axpUYkchFtdajKgUupQh6v16JSJEauiRuZtbmB1rLrP06f6Lw3qho7fwJZ7nzb4T8M6WHTipCyIRMT1Ogc/g/L/qn/+vLRKW6m1l1F8ITcQJ0HA/dkLaby241OLKjioM4HHNxDslgwDCkbjv0JU+usupT1VRMvRTyQCKEIaRUfSHQmqMAFrOpymGaYxcQYZ5KLTMwKeNjmUer2EdU4WoByHNFNwPPHDCwgDaAP3apYa46F61FRcwFCM+9dUGt1tBnVRUOYT10qRsSwOy1go5mM3tFzqDFrNaaMMp+vlxtri7WWHWl+Cz1IYSf7d+WUnKZJrQxz1pG3gybm6vbfKzgCsBq6jZjjQpo3nNnqmAQoBog8/Jpk4OjI7ie7UQBiSEhM1Cbr8iNWGSmqmhOjnBihipidVf0LA2ZbMZQKOgOzn26fHbDXcDI/WqYpqPZ0oUFJg7ce3D3ELgB/6SO5k1CvnF0vYqJoAO26UEDd7QIjUlJgKVUN8PWNxBpnzQzx26NTs3EWFSJmvwC5alg4FIrphA4dUPvTF3XTcejqYGCoKAhNaUuXHzbwYGc2gNAARxCw0oXXN8/YDi0eHU0BU6Ek4FFq6/x684F5y+eLv3lC0R1VRFwBQRzMRdVub+/22xWz18+71edoiuYAgrgMRcMvJKBun5InW82KtXAspaCpowhsyOoakBKXciTeFNvQVDHqn6Ypvu7w7OPrgh4Hqc5T10XqteH6XDQrJE9MjiSwoJw+QI+mFoFa5aVBO5F5sPRtVo7Z8+sQYSlY+5N60qZaRjWoe885+w6WR3NCqMiZrMiWquuQucVL9L6athQ0N12O6R0e3t7YCum3HcXF7tVrfcPD2MtMBdEdrBFl8WUXScxNK/ooAbuwTlGT55CCIgAKqjKy20wBAxIkbjj4ERtXN21mqq7WxF2DIDBgMAdvJoJIoCJNC4xmQmGgIAGsN3tbp4/M3AiTEMCAHWLMSBBCBRCQMTGOkNzFzVRLVVz0VxAQVWl1Cb7HhI3tA1Mg2HLvMnJxQOHxClyIiAAZA7MLFVdzMylVLETaYXQsrhaVYkhxJj6YdUi1+HxsXXH2vJrr+1MlwCARqk4c0s/9Asaf2hxyCE0PLGMARycAe30pPh1GtO31/3Ty5oSmxayikSO7dOYHw4P6932+tmNM021NNktC1S8inqtVWpepbSJXem7qv0kc0GFAJyCgmsVdiAiXhzpofkyiek45bu7+91us1onFZRqsYNc6v1xVHMNWAkIvHGjXLUxx2vV6moMBKiqniXDXOKcXBs+Qsit1cqAdvJ1JUB2IIM+MCNO+72jzSaTqoZkCNUhq6m6oj67uOwo1Lk+u7zY7Nbv3r37+Ref/zSVPM8dhY+un11dXl6nRPvH++nYDV0TaW8DG6dJZZNpRnNwj8CduTp0hCEEk8qmjoS4jOwReGtrukPkkFLyqaB5BCLzgQI5MAI3+25cHNyCo5kjh2rOjUEX4fr5s369OpSRCChwU4YKKYYQanCOoSGbWlRMVExF6pzLnCUX10btdFBb6i4zdxM1V7OTZrhWafQqFwWGyJxSCl26vXtsK6ZMs5SKACmlGCOeZspiG4FgrtacQhHMVVWQ2pAWsIN5mz9p67bWqlUAAInDmV+6LGJYpiLNzE9aCeRtTgMXStP/1jzy11KgE42JQIOjuRXJU5lUdbddd5tVNvFirAGBNDAqOZqqSi5qwARDDHPiLlGHUYNjDGamc9Fc3DUiWFv9tpxapZTHx8f9/T7yjoFCSKYwljqLYmBhE4bQjNvN6PT6RQQYlmRR1Et1EtRmF2nwLQYiAJgoE6263gl7DIg4lqJE1a2iK7mAZRUxAyCptnm+42rMtLm5enf37i9++fNKtnl+PcxFxnl/PDDRxWa761ciok2tDZxhEYppAUhMoWUvLmfv+aCBCTsAbDoCHIItTcz2dkKXUow0Ajp0IbIjU2zdJAITVEU3AkcMwgZIgbXWSAzoCn5zcxNjtOJmFREpMhkAE5DHPkZiAGqKKS3jJwevAqJe3d0ZOBIiMQIRwIdRXW4DvGQANZeWm+Vp5hj6vmfmoFaLmmopUuaapxkAXFyTU2AyJApMkSiYQanqRQJxBVRbXkn742p5mu3Ue26wUoyRkRZdLPh6Em8I4EZI7kaAbQ+7t0Gwr2U45wPhN+c/AP7BIKN9PFrKPI5j6uNqs6aAuRZTZgMyUnBgYmYkl1rUjCMHgC6GVddl1ALkCKmPXZ+KGoF1iAZQHBwX8xJXyHN99+49uO52mxT74nNVhRiNwTuCiYEdeAmW1up7IGpqNu5mxmrsEIEW315fsKLWZDFEq6Kl9imloS9SImIAToZHOgkcuFeVWVUcYuAUE4SwXvU3VxcwpJ//1Ve/Lo8//vGPmeTi6jKK1/sDiWHVDvmqWx1qBkBwrG3Q2RzUVY1iQAYwd7Gq7pZFlUNZdV1ibsVlxMAIbIuQh5lF4pRS498nYnYECuxACApKRIXMCZQsChlAoDD7RMCEaqZXuyszizEWr1W1CbA2HlIIAYladGdGdEbRJU92IIBlWBCgSeaIZFADNTdrpOCm8EWwaCwYeJQYOZztI1R1gZhqJYAM4O5sMRCeJ0+W6oywQ2YgMQBtwwGmIKCuRVogPjOIGIg6DOek35sQYqOvPc12TsMBZ/fm80L/m8L/13bIBwbsotxRa53meb3bDps1EIkaUTD1uRQTs+AYmJlJ3NWQIRAG4tW6P4KgZHcPkVIKUCOZA5G4kwo5REbkQESu+u7NWyslRt5dbXJG5JhWq1yzkwGT0+l9naZ/QgizLdOJ0OACBzdbiLOEQIRP3lf79npYXVxdPj4+VFVAY8BqIm6te1OqzLm4Qcddv1pPJV/udpr4529+/S4fabd+U48/+fwXuzjc9JuL2G8poUAXYz90Vh8QFF3Itbpqs9lWlHCSKScGNXFDVSQy9XaCdSFGClwUFim2Bn6HZrcI5oER1AMS+9KTcQJmlACAOMSoqiGlI1IiVnJ02G63j4+POIRc6zSPIcUO0cARsVHN2lgFM5P5bFVyaXPkZoZOhIpIhoaIWpvbpqs7NqfQReWMmsZsw+xDSIEiGBKinLRMCJZBGYHKMRKgtTlYkRbOQoxQ5Bzmz5M0bRjgzBItpeCZVn1euOfVjM2+94kd2Deu35jr/1ce5q5P9oaqapW8uXzWdbEZUQCSgpdapagRAAAzszu4EjIBBoIh9iGP51cYQrCgZO5UwUwApQXy1oF32O+PYHr9/PriagcAHEMHMNXclqadB7/atJUapSBFpjoDQDBb6P6lpoROCMjAbG3ux9wJUkoxhHU/bNebeZ7K4WBgYHISCgZ1q6ZFha05RsJ+PF6V3eOr/Z//9V/QKnbb7b/9/BfPb57PD/u7/R3FTddtqChE6YZ+iAlNgBAEgMDNqJ11pTBzxEgI2LgAzMx0hqSZmZCIHEUdlvq+kV7BvAnmuhlToKbShoiI3riahF1MgpJaoh2jAJDJMAz39/dr3s15nqZpM6ziaQBwrhVbquxIiK0j1kDGVg27my+tKFp4b9aoPYt0aVv/OWci8mVghxqbzZ9S3JqcxImLmkIIIQgt6SsGXmbWWmkhCuTGKrY8w9JoE625lJyJSHsx0WCu4A5LMoZwEo0KIYA76WKUgIBkDiae+lZKEDqGsOjJmfqCpSyh0U/Cj7HM43gQL7xa7SPe7ssj4Gr3/Gb7oqtd8KgQsnpWnYNMLhdhYJNcsxgE5hmQKIqh57L1XXG7m/ejWOiHGWE/jrjpQMGrR2k4KLATOTtscPb71w/kdP3RRWD+6u2rZ8P29keEf5U3t/P1wWqGB/T7zu+iq877+Vjd4noYqbzLR4fwbDtM0yMQCkkTaqwBVNEVymH+9OrjPvXlsaJ1TnZXsoQNHaaBk6KOY64iEWmIsY2Xv9heJqD3+2O6uH7j8y/ev4Hnl6vD9PH11eP7O0F5O92xwQ+/+/2Kbk2BoXEhEDzEAj6a7IQMUYNlIonshBHInDqnCxqe+7CaLYF3CCGymYY6Q+Jc9jTgne27IU11ChzXZIPRCjg59MomKmbqOoX5YrV6eP/+puskwOvjw7Pf/V7ZQh7v8/Fx4+mae7ZUV920Cl84Ps9cTYuZM6aUPFFmnhwQqM2OgztHVoRSdV+mqBGAAKh5goDawj6IAQCIIaUYIlSdoSgRBQwKisgCkM2IKKTYdZ2EoO5a1HNNEC6fD6FL5T6rLXowoMrBA2Itdc55t9u1DDMYhNgxM1Z9fHcbfmOM/3av9ynSvwROXIYJ/QkXaGmmwTJTa2bhVB6om4o3Ll4r25cOmrmZt4G2Vu0tW44QF8RgOQEbfT+cCNtNNxccHNSbjAAaGJqZo9daZc50h84WBu63/W6zGTbDRx+tDr96VHgUcAjsaApiqqC2Dl2uZTpOQHhzeXU57ByBidRNRLN7MZX2aTi1l1RUilpVAUJAzCZz4gw2oo2uEzWgrcUYmKaJAOfjKDIDSlQAA2EYpUDkIkpqq5CKyHa9Vjes+uFeuDeaTYODDFzPSSgCgNHCV3eGRaJvyZQcEJfxqJZSm5m6GrAZKGAAAEBHBMKz0m2711Uk9d3F5TW38SsDE1dVq7VWEgFBH706oTNz46Qh+TDAVsZy30Tn/QSbtF/dPBmWl7FIthAyUGQAIGeAxVVgqTAdWtbu7q13xjHC6SQUU3ef5/l4PAaptVYAaSygxrag02zkPM+LN9dpKrL1yL6ZAj1d6N/o6fopvzczc1koNacxl1O/bAn8pxlNQW+2b6iqbdAREds7cVqgVfWl9+FPusuMBCcfETRnpBjjquvn3AxGreMAsVMyI6wu3nzrW+EPYFIOx4N6qZq7Vfzs4rNn18/X29WLupb1r4SgBuQumKm0tSW2jf0kXo5TXPcvbp7dDFuba2JWMRHJIllFlpSBq6q4qdRDLrOLBTbGqnLoeTZ/FNmTTm4EyOgFfEVUF0KgB4N1CmvkPBXvw6S1X/f58Yhgmz7NJjeroariidmLBs27m2xh8jWMzkHJAyKgQ0chnoZ7AhATBMAWpJjZiGqtjNRSc0SrINFZHZQQms4fIgKRoYhxDMA0lbG7Gq6fPwNCJwSBJWtXrbVK8QI0ljl0KZy7SUgxRu97HwY1JFsI/e2nAzoqqeripNWWWyOdqfvCykYzM3GlZWxGq7Td27oZ7TUsGwnczHLOx+Mx1aqqnJag2TL+M8J5PB7PkfRcvobWJvhGsKevr/tvbINGn0Fb5NQVwd2aJRvSh3Fb8cZaMjQDQgQqppPkooKI3KUmPwvY/BFdzBS87ekqUk3b4EHTWmOiSBxSxJPEF1VKyInCbKJMrg3RtMbjbJS9WquPpqD7+10A2q0v+i4pY2HwiNoFRygFXCFxwGIr54ixUKLQDamLMaoYVUImUoImImRu4OTNjcXVdK4lg3okAc+uE9PR9RHqwWpFC8QBPbvNJn0I3EXYg7tvutWmTI8P7+L2stSpS50GBAXqYgXlLmEOoLV1H5osIbiGs5LmoueOAMZAETlx6JgScUKO4BGo+VsxMofoFEsR5qi+3EczU4MKQBgMrIEchsxEbk4hOOM85b7fbS8viisCRyYADBaQQkFvGhYiEroUTwoJZ+2qruukmrX7jQjgbM7GQGgNF240eSBYRJkWJRgV0+Ai5i6IOE8qIoGYI1FgOs22u5/CbjPsqrUtvIDBnwwW44mk0zCl83s/f/E3ngCn73zTALDpWMAHSZb2eiyX6SwV0RplyzEK1nD6RpxU1dZqVVVzD8xG/GGnBZYquZaQAyUIjngCBUIIwBRjbG8+zqHUWlXAx4ouhaVhBG5uviRCos6Yx+nxfl+mYkXGUv/67riXaZfYO6i1ihswRYg4Caol5N1qbTFaqQVzcGhYBwZOkEQRlk8fOAYHENNsMoGC0mxS0MWtqtSiouaETmSAxfRYteMwoz2W6fb4eHO9Wa3X/HA7z/M05aEr4hJxGdEBPHdalIjIqSEtS8QBt/b5Q5Mi9gTQhzBw7IgScTALSIGIgBMGirEQ5WlmZnEHJ2RyRTFQNFH1gO5o1OxOuFputsQCGPohblaVEcAMiVKM2BFRRUevLo4AoRETY0QmBzvjkm3GUUQAUWFJClSwVmlVcjudkBUbKw7RAJxAqoNXIgWA4zG3mritQGkb7JRTnfg6HwwEzKBWneciokRExO6gag1VWXJJW4QkRGw5Ac7gZrvOy/3bR4E9HbMHa7xZEZnneQGhU4NwFgBUzaxJdJpWVT/JeqkqATg5ETmauql7QESiosLzjAA9x3iaw2Qi5hCJbb0hxCGkqeRcy6wVXUsGRHBXdUED9OZ8AURUcnm8vXt4/xCID+N+erPvyyF1lGdv7m2ByBE8sGR1cIwBA+ecU3XCUGutLeASMrA1nW9bToUqZgjVdJrH0UUIaDbKSrmSa0iBHRFA3SClo9UBykTyaFMPJV2urvHF+4d7AMS5tuXrZgFpnmc/a6ERNCaMmXnzHDq5AKEbI3XICWlg7ok65A4xMgfANtlHuIjkTNN0vq1EzR4X1VDRTc0Y3cEc0Kqpm1UgghSwi57YE9daeyYKIXFHgMWUXMEKnEiTkQMFNjIPpqc43dYrEjk18wh6kk237gu6n3otgItQF5orAIiBz/McY0wpnQuYpTY4uSeFEID9LPhjJnmayjS3xgWcYKMQop39Xn2REf/fPgH8298RNTyLnriYziXXmrPWgI62WGjAWfe9qTi5VdPTLlreAH2dcNr4IUNKtZS5ZACgDoiXkN+4e0y07noG7GLqp2maprvyYESMjWX1YUCuHX+J06TH/cP+4fYO0d+9e/dG5pdYn3VYTaBUNgiMsyoEmlTnUuZqMfYXELGq5pJxVvcCVhErLLSLFrpm0WqqCFnlIU8zGnaRx8pzjUV6dmLkQOjganHVHe7vN7BdP7sK5Xh33A8BNpcXF6EHtZLzphvIvIxzTyHvj37SCSViD6Zo7bf7Mq2EAMZIESgiJocVUofYEUWkYB6IGJnAzQGRVOU4jZZIxPzJbB8QVjAEdDc1UPLoERCqKThgFyvBUQqFlVMACtCU0BwRnGRRzmwOI36CLFvNepbyNLMmrLbsk3gCCb0J91IzmXR3dHTCFsIXnUt3MJdSW8usaU+1UgERXURPCigmKlbVDeBUT7qfD5mm2nau78+yWbXW5WSBr1+/Mfa3S1UQ0duYiVnj+cy1uDu6VCBsQI5rU3ozwnNnFE4sRWwNmiaLC8inhh+o8irWWovUIFyjMlJwFjcXI6IuxEhMqWMiVEM1FuJG+POmiNXEXxv4TSEEApjneTqOXR8Pj4+/3liXcA5cVFKRCGAMVcUjVcA7qceat0ZXYRcFbD/NfTHwAlYdBNvEBgECBbZS1c0Jq+khT4WgHyJWAzESDw7BkJvlmjm6Ph72z/X51dXV2/vbN7fvcp6uXjz7o5ffQcT3b99t17tSyrvpzYa4ToVSgBauCAFQCSo2kY/QZIzbRxcdonsPkAASYHIPgHyy6mmCa+iuADlX4WhIEBZZAwdwA3AyMAVTB3XoAruTZAUiCGEGfSxTb6nrOkc2QwFnaHmIIlhLkpcRXiWtUnKex6nkXOZ5SXUCg3N1LyKJe9BF2o1PPlpmS7LdHEFbfdn6BcwoIjJLCKEb+q7rGqLYigFXPWf2C8vN6pn/3I7NVjrbSUUCnxB/zIzOk2NnWr+dRjDtdC3Jm4iInDlAZla1ZMlVRNyq1SJSRKqWqkXbcmdarI5Mz9vR3Wspx/2+lgLmhMjMXde1UetSCgbuug4IpWllmNVaAbG9BndnpI7Ctl/d7C4/ef58SJ2ppBA2q3Ub+G87d7fbHY/7aZp++L3vXV1d3b57b6J30W51Ptbc9/31egtTmR4Paehn8n2C1zb98njHu9Vqs54fD1ehV7eplqnkLFVOsQ1xUXFrQAQxx5SAcJymo8soZXZxQm8PUw3Ex/3h+dU1FsFp/vHzl791/bJ/zG9/+ou//l//3c/+1Z/eWPyt6xdv/uKnn+yuByeS2nGspXDkLOXheGieaNoGalVNNXJYpdQxdYirEDZdt+GUkNkN1E73q034oxF+/Omnx+OxSO2GXlVLVamm2mTIwZGbsnERLSLAVFwPZf7oO9+ZTGa0wqAMwt5+6EQdcS21i2ndDwQwHcdxHNvi8VNftU0/zvMMAH3fK7QusCGCu4lUMwshtK0CgO5Qq5RS0SBxzDm7e4wxhADmi+4VNNfkNna85EW4sJgDIjdzHPclpxIxVWeOXTeEkJZsjAJzDOcwfw785wPhhDPA028uhxmgQJNZRSdEbl7q5GTiiIaO3hDgmCKYorYKRgDNatVSQaB6KZwBo9siPW1qFVo5j00GoWXYCICIzSq4YW18muRch9X1ZqdXdY/HvU1C4mRAlPMySXR1dXFzc9P1sUkRQwrcI4CVaXbRVepmpAfTuzKOZAe20lEN6IShucAxtUOXiJcGOS3Cj8wcyKNZa2sAiLt/JXMBUUYiYymImIhiIEbSKSOmq81l38WL3j571nUh8mzr3fbZ1TN7nDYQrvvh3cPjthsMnBwcQJuqBbgiAEOn1JQKAiCpB8BVDKsQVxgSYUIgA0JvSnCIGPpuLHWvkwcaNusxNmodNt1xAwoILftQR1tGmMARxR36YJEtBonkkR3ZhUxPGJQrmBAtsRaRmBdbg6Y07O7iJiJKwMCESIERNQQySi0Amxu5E+GpLfRBV4EA0eH64rIV0MuOUtNTGbkoqSC6KpyE35iohezFXzWlFGLf9eM4Nll8dJBSWwLWpy4sS/+JCTZ+i+X29OKlNHB3X2yfyMERiFozSkzNLCCc4NT4IZdSdVcV0SpQnY2EC2BWZAKgtgGW0p6B2bHJNGhrCCiHhjlQJCZuql4b7eniJmG6C/sId57dxFVdwaXmzcXm5cfPP/7sY2JzNyLcOH96efWd57R5e2t1cnchP+T5mOcjyFxyKeV4PE642bQjsDkuwrIW3T98OIwYkBmRmQOzFysm77jCgIgUiNGVDQix2YzrVBDmrfGzMDzrjXlzudu9L+PV1ZW6/fQXn3fI69T/8vGLzfNnBcDQml+QuimYgbXg06aig2MAS8gD8y726xgH5uBIvnSXGiskdOnh/bu7PBLzaneRa16EQwCRG/4PTsvdFHV2ZHRFEPCwWuOQPAWLbF1wCIDsYiraJNxcDZldVEpFCGdARtpUgbuqVlMApFPGz5EQudULItJoMvTBVtTbnLb7Iqbb972ICGCWxdWcEQ2WxMTV7JQ/uxmYyQnifEqpWIrMD4Wy1VpjjH1TafVvLPMTVHz68sMChmURYJs8UldxsWUKyQCgmpIDEzhSQCLEnLPCUhAvzlxiqAbqIWBEAgcXMdE2vdzALEHEk1cCuBuSy9JvZmUiwrDIjfbQpXUawtDzECG6QB7z4fBIBMBw8+zqe9/7zuXV7v7x1snSwDcj/Pji5rc/Wm++tEe6n9FKIy3nUuoMU9b9eHh3d9CBpUyOxbW6iWkTTG8gBDclEUdCOEvIuFopuW765rBk4MEDuQGwuIsZIIhIGacQh21aBZA4ycX1BRG9fvvuzZtXm92WY7gbH7b8rLV4zKXN+Dk6tBavA3MMhAhK6l3CTeq2fbcKISHSyeTKAQwJEEfV28PjXgps+9R3VudaNYWIRg3LwJZ9O5qpgQdERwL24qXfrDAFYRQiC+SOKlC9JSJqKmjm5DlnPx6DdRxPZm1EqlV84dE0hxsgNIRh6JogCiKFkIhARGDp3TgYnS1h2nTY4XDwE6ACAMDk7mcGEZzg/HOwrlWZOcauHcuIaAbzXNxRxE7ZFIo0U/sFDlue7GkKhE/A/KcnA5gikbopgnpt8d5ODjboTuAOhAgtCVYRp+YhBKBmKlqrVUkY+9St+sEoFq0nk23UE0jaxj7QHDgAg4O3TiQiOkACNzIiGighc8ceILGHmuXVV2/u7t6PeUb01Wa1uVgbSJWcEm02m8up+450q6PInGfQif3gcshT2R9jrtfIKxpWgmRe0R+0qLuB+5kM24zEm5MfBSdK4D3HPqbIjOYBWv2vhsAxJQ5kUKsUotCFg9Qv7t+sUvzB9YtesRxGSfzu/u6Xb77KDB9/+qISCAAPXZ2n6o0zXF0NCRahGofgGADZMRGtYrfpu03fJcIIxEDnHoKoGvjj/bifxhLQA0n1uRYH6mIHAGcb+TM05+DqRkjiNlXZ9J0QzCZVyuBDQVdUba0OdWhQg3kpRQAiWAd9O6D6vj+OcwvAXdcZIafYIvGw6s3VXJkhcOSA8+xaFMndcNEoMySAZiZzOOQzkkMn0eXWAbBTPvw0RT8/sl1womCcIVQ82ZCZWSklnKuHD8fAU1rok7PhfHyAuwEqWdusCm5oKq0kMAYkA0EAgOBOFJzQ1RctJCleK1TpV6uh61apEwyzQucgteYnv9TMFLGiLqU5kaNDPXUxVSUEIho6ZgxI5qGD7eV0VbebDQAcj8fVpuv7HoiO81Ghrnfr9Xb197rPdm+n4y9eTa/f7mu+8/zVeP/23buu+k7p2e7SV9hv19t+VbPtjxmZEAgMTpp4S7Ros3kU2AB69pUNQ+6Ped4VVjOthg6x09gFB80qFEPoYNI6jXdhHIYXV9ebdQ3pV4e7N7dvXo8PlzdXYbf+6vYdDB10sU57bYLy0lxfOUAjuzMTBcSIMETerda7Yb3phuSUgIIjEimAIYhKNnlzfzdJ8W5Qt1xL0zsyaw14aGOvTXITgdGlqpOrgM05UxcFoLiS1gqqzgpo4OjO4JHYCOnEzWhhm4hijLxa5ccDqHk0IHREioFCACaOxJFYkRtGDBxjpJNtipu7Ay97uIXghV6wMC0A9cQ94ydevWALw3fVDa0BUUXtBIAGpFJmcgBAF6VIfUwiUmv9cAKcVx4+oUafO8HLDnA/jVCZn5xVW0LpCxUIAEDMEEHMzWG97sxMq+Sc8zyDaUTkGLuYAjE6EOFqGDSlcjyOx9HTojqxPK17Wbj+wQGd7AxPLZ0B5RDdDKxKCvHq6uqTTz65vbsrWi6vL66fX3d9nKt3Qx8SUcDLN1P9/E3+4rXPOaPd1fmhzobwYnt5nfE72+eKUBJ1qZO+zLUayFNUoMXJ1jFapvIIA3IX2yEQvtddmFmmOecsRRGkoorpiIjBrYdjEZveTa/wsl8Ht8f9OM6j9GHz8fMHyz/99RdxM1ig7FqaEbgZmiN5RErEzYQzMEagdd9frNe79WbNIWRloEjcxpkqeil1Kvnu4UFjoMBHqUUlhIAhiEjC0Ih0DWBoydXyHhcyi/XDEFPCSBi4mgotRorLSgwE6CzghNx1XepCSogNqKeu68jBxb2CAXAIFALG4K4hEPQRnVqjjJnCqkfMWrkCqurZscrMiCLiyVDjSZ7SYpA/6QEDLFN+DbR8mssAQM65QagNpGoTZ7XWD42wb0T68wnw4XzwD0fm16oDcrRT/Q7Y/kVVzaGZDzb8Nedcaw2EKcTEIcaI5iKCHFNKa4f9nFXV/YPe3cK1VlW187tF87NwnbuP4xS7oRnfxj5ebLYfv/z0OI5Z8tXN5cuXL1OH4/xI3NWK03z84l//J749bo66SolJq0LarG4uLj+x4WqvL7rNoeYHEVADQk9sVsSsUZKWz9pBXQER2VCxyTM3t0VG/HR744TzPN4+PjzM+1LVWR08q0AA7LgG/9X4+Ouf3/UIQ4gvVpep64ftxcXzm1dvXr8+3P74+W9jCgauLq0HDOYI0NT9l1ISMRF2MQ3DMHQxYqBihMTEbVofXQ1cROaSQxeBSVXMLHQJkMs8eeRTvPqwsHB5foYAFLhbDbHvuo6w65owrZ+CMQNSYGBEMAwcuq7ve0rRzMCQAqaUQE1YztB723taZiJKKZmqzFa1RE5937sYel3IAH4W41n8tc9lQCOctvGXbwSm9sU0Te29NPpMK7XbChyGgYiaDGMTiG44ni1TBuePA8Hdp3nqug5DUFvAJqRG0zNDUAJg8mUuyQGMidwM1FSUzAEoADrAqAUd0DBU3Niw69frfo0OQxrcsQ/rrhsEvRNThzLND6utgYpqdXVyZKAIhKA2BeCOEiExgprXagQ4E1idJpA5GsQxMuGN/MiudvH7m5fP5q3tA1/UZ8//7a8++ic//+in9z+7WVQjx5p7sz/gNfnA1aMB9WHvR+kwEMx5b6Cwws2BM8IEcnTNaJaAAZNrFA9zvbl6Nh8eV+t+lvJ86O/flp/D7Uery6v16kWAfh4SwOP88MXD2wnCOym3VicEC5AuIqc+phWU9ZzrH//h3x7f3f/lv//z74WPXvDFuqZ6n1e03tdczGMamBkDggHuYs2yMfpOt/khXXy2jy8zPU+dzBD6bmJ8J/k4hD2H9Y+++7/8i3/GW069rQKgYsgaq2BMzAECZ4CKxoAIzSrKGEhyjZvup4d3+ukV/fDjr+QYdR0fpmG9idGduPRcsARxJmQgfLFu0zOBYuKADtmnaZ4FVVE9ACd2UZ9mVuv7/m1AAkopWZZ5lpB6RLp/OAxdj+qaVQ0MG0GGDNxpUbpoo2JNyRodiYKZtBROVassyGaIS6/J1KoVZnYzKbJdb5sH99CtAcDEs1QADE+30TdOgKfXeReimYJrw2TDYnesrSY6cQQQzNUavx8KECzcawx4Hunvus4MFpTqhIURkWttqZprGylwDwiEAcnbGddUgb2lggLkjiAgFdRn88hJLTBfbHcxdY9zfqjFHivf3qaHh+sly3QAcDM0P59ki2XqOZwgMFGCJRNDaGh5A6qWeARAABBCAKLEQYm2q/Wbh72F1fby2eWLTTjOME+rdX/10fMvHt8936Qjw75Mop4woqKUsun73/69Pzg+PPzkP/6HLa+vri7nebrkG3EVk6q1mgBQYuxDIiJ0jIjrlHar9cVmveVVD8ERUtcpghPevHh+PN4/f/n8f/rn/1zc+EmAXA728xens/1rhGCmKoKIu8sLM+MUl2wUQNWLFBKIqgEWlX0/DyJzakNnlQiJYow1FzUTUzMlhyKCtXbrDRF1MSnUMmdq3nhhyUToJD7QAryCS5VzAbCUAad/Pb+w8wkWQlA7eelRbAUAIjYq0dfQpNMV2t2Er+X9y1oXVT+x31rvuk2tiKm4YWDA6IynXI0AkQjdrQE1ogruXp2R3L3x5Ppu1fc9OoQQVJf6ps1TpJSGYUAxIm+u6OIVyN0DRrZA5CAu6GBgYgEByIkRHF3QqooYSCSsHgBWq5US7e8f3h32Orq9e5f2d9/jLWRpR1xb6+TOCymG2jJx94a0ACIRKAAQurU90HS9TgkhAgB0MQlS5AAMl5udfvHq/fxql/3y2Yu+71UskV9th+fXN3vJ78f9XjnnSsDr1PebTm6ur9fbP/3Pf/kgtx8PH3ddx6tuLPkwT7OLoAMTx2V+iJnXDn3ong+bF9vdzebiArtOHKvHoZvnsVut70p5+elnf/rLn/3q1evVzWXP6O6uhk/iGpyF9gFOZl2nNCDisWbq4ouPPiqmsUstdLljrTUrkCIDUQxd3w/DIAmaU0vbAK4msYQQOEUDz1JrKbjMSzmYrFarZQNQydPsVQKSJzBRIuKAqAs/QtwaqfHp6j+vaTvNtbR31LKa1hpqhJ8UF3MAdw8htiZA6+ycfxYAQnE9h3l7shMAwE0Yl29iGzJQoSzV1NzRQwyEpxOq7ZAFDXjSml7CCjP3faLU931KqSmFnn4tIkLgMAzDZrOhwxEBrT2HmZs5ARlak3gHMhd1r6oI4AYRCRGM0UyLa6nm4lA0iZdZ94+Pj/vHQVOsZbRaI4AowjLMsGgBALbeKZw6X+6AjhEpII7UhqqQHVtrFq35ayACqGoIQU0jkiNc9KsfvPzO7ZvXr169ulqt1jfP3FTnSYuZFDger4w+6Z45+3g4JkmXm8v3F1d/9qf/4c2rr767+ayRoL778ff+8pc/P8yjBMJAKYXIoU26kPk1h4t+9fFm96xfbThGI0QzhmwSt9sHyXMgLfnf/eQn22fX2Y1OI1GAi+LleZ8DgOHSxmyoOJgL47HkeLm9efniAE4hqJuBl1JIrIO4DqlPw9D3w2o9DIMGDyFEDkTMxIYYupSqpNyFLmEMASGFFv0CM3fDAAB96gSp61MFCEDokNVO69vccOl+nEg6T8N/Kw9ao5fog5JcW2YxhAaTNMZbY6y0dvWpPv5QNCNiKLWeV//T04GI8DSmCEtrQ2utnouaOQITQK1M0ICq1gZXUVVFMwRo3D1KBOaIxJS6EFvlAWcf3HbauDWL4RjjQKyu6qamZGqEaG16w5puKoGqKS6eeKS2iNw7m2iptUgVyKCGU9Z5nEyqYqQh2bp7GAXFAODsstmCf/vbALUxwx0YEQ3c2wT96TNBbDiIuxOxmZdSNmkz5cIEodqK4ovr3f27t2/m2/Cqf5BRx9HGvA3p06tn67DukNkhEH387GIY1l3X/ecvfv7zX/1lAH7x7Ob17TtOHLvwqze/riAChBQCGLsl5J44EX/C/c2weT5sLyhiFjVRZCbe57zZrfeP+/7Z8//pf/3/ZlcInQOT1xaUqAHkrXY8ff609J9O/yFUhkx+cXWxvroYOUOIVWcSHWsFg23Hfd/vdherruv7vk8dnLQZbZk1wxBC6FK/Gvq82ooBwGq1Grq+cb0mJDBj5jbfZVXJEZjh5FdgZg56ntxKKbXVdepq2dNgbSfW5zmo8xMDPHiSyT/htn1tqYdZ63n1+5mwiUhLt+eUvpupadGKtToCMIEqqFgF5+YBhnh60e3Ia9Z6FNFEASCcxxoMwb0R9EJIeA696u6QmNQ0q5Jp+2zdodUYiAigBLT8jY5GBtzk85XA3LJUKYUEEHk/HZtrYnXny3V8cfn+l48XpwRmoXGcCBu+HAZNSdhBrW2DVnW01U+OjujmBMjMrpJz3mw2bZLGVFcUxAX7lGf+2ePbv96/nuqYAJ5BH3eb3//sB8/7zf2bd5I1XV6Moj/94vM/++WfrXHd9/2U56ub6+Fi+8uvfnV/3IeUCMEMgnhi2MV01W9WXf9973fD+ir1yZBFzFACurNE+vzt66vvfecnX37+y9ev6Wo71iptItONAFrKTjnrE0zPvb1xbxKIBJDRcUhXHz3nLnVdLIxWrRaQaQ7OHlfDanNxcdGnxMwh8dn5vB3RROQIyOSBOMVuswohbDabvu+X2DfNdc4LlBQCEap6E2xsB2tT9GnqCkTE/KEIOeUwH8Yaz9f5MW25nndI2zbufvKBdoAPTwUAoaqcnxdOBH1sIlC27CECJyIxrSoRoRnzOGGttZhyDCEEq2Jm6BAbToV0GtMmczcTX94DmlvbIeDEzECE1uoHIqKecKputYApBQTApqHkACeAFBjQQZvQhlEoVUgUiRRNwRU9shfDx+lYtQjBbCVd7/rvwt3bx8uC0NKYdgLY0oJu0Ja1NqA5AmBVB6j+ITQwYBt5aFc1Vc3teRKxiQDSatW/+ORlWYe30/427yfA5Kjk//Sv/2wE/91Pvkc9Ty4/e/ern//qi788fL6G1cXV5Wq1yibPdldK/tc/+5mDBgoUQhfjOnS71N+k9fWwWffDd5SH2CcgNCOmEKIjTCbWxX0ZbR7/zU9+wkM/qmKfmsbtWbSnQQ5ykn9qf51AQG82PiVCGIarj58r43q7rl7c0dRlKgXYN5Zi7FerPsVWutlc2k0lX0TQVLWIOCJ3qSNOKXWrVeq6FpvXxIcWX2xZnaK11KIt3YWFSbGsXSYw/sZaP2co8sQb72l9nFISEZUPBhlnDu/p1D+lgu6hnoqDc6rj7vRE/dPM2oyjmIkZE4ZmRQiubibm+KSJhgsxpiHWeMLOzcyJTr09AACV0xJsfRUmb0PKAGTaNAmZGA0NXd2YvOXdRK4IjAgtYATUKmiVKmt0JfOAxjRnO5R5tloCjMZ2MdBLfrgIelA0AEc0b543eIKCWvhp4z6s7urobtyiJJA3S7pmVAgOIG4u2o6SyEGgBgpJfeAQQhCG0kVd0VhkmvIW6R//9E//1U9/ssa+eL2H2YFW66vvXz0bx3G96tYxWqDH8XD/eM9AVG0Vw82we7beXafVFXW70PUQniVmYhcDII4hxG4yG/NUTF7+8Af/9//5Hz/IPJqn9QXG0EXCWs+5QRtdhzaZdJpKwVNS5AjoUBniqusvtkqYViuaxA0bS1HRXbzpo4YQDA0C61GfFnsKXk1FpO97IEzqXdc1pXJERCJmn+fZXIydGAxBVUWrnYrCRaovcKNAl1n9SeA/5+7M3II6nPDDtm0+wP91mbpU1VLquZzAE+G07YFQpD593gXgM40xmi3tNJWlOOYY2GmuxaU0/8qqyqru3nUdOiySQU2/hEKIkSOBuTqVXKbD3cVqc7W7aOdbCKF1bUIIjmC1xhjZ4XD/cMzzKKXbrlfXF2qyW2/HeaLTNFRLWNvCnbwExmCkVms1j0gdq+Ld44OxG2BadTe7mxB2++N9/Whbvroz0z4GUte5DF0XUhprjn0nJkBAhu6uoozUpQQyWmM+I1OtuU1RxqTgGMOcp6mWYRju7u6eXV6JyEdxCGbjsJ685Gp3tVRy6jrtwjzLXMsr3zsA931/tanb9e27h77vLQAlvn24ffPu7dV2OwzDarVad8P1anMZh63xhYUNxRWGDfk85W7ojfjusKde4uVOSMJm/c//9F9/eX9bInbrdbWKgv1mZdPc951WmccpxhiIa51D32krqVqaRYCwGMfvJf/Bj/4gg/ZDp25IvBnWD2/fkUOesquvup6IxI0DAUMI4XA4TNMEQG1um4i22626DUyq3iwq6HT+FKtd1x1zOR6P4zTlPFWpMUbx6rYooBBzChERc85Mqa3+xlxwXwrONmBwRoHwNFq43++bcgSfPAHaDjlXCyfuE7TgHuRDjffEUAaxkWbhyXWSgXBHAET/MBfvJlqhLHkCYgrxvCNbgS4i8zS5KK63MXa+MOjAzBzREM1RxEqRw+Fwf3//cDzMoluHtF6bgweQ7BTA48LHpFYCA2rk0JIyY0EoLsUVVOdaikpMzOt1WCehUC56+PQ6/8WjzkrgvWM7ghcmdvuUwRwc3dEXbgmFgO6BiDCQg54IK7mWNPQ2zbf7h09fvFytVofDYTOstmPuQhfX14HQH83nOhtTiHXOzMjUg5ky2jrqEG2I6+sUiICgWkW3PoRViJvVpotxE9IFhJ3RTnCtvgHryfaHu8++//3H6fju4X79/NlE8NXDXf/s+t/8xZ9/cf9WIoVVF1JUVzOxkuk05+TueMoT/Kz/576wCdyAgcxp3UMKBcxKthGmWmpVMIDqXsyqSK2gAKE16S2F4O455wYjMcV2a9Ub/biN2fA5F+eABnqcj/f393mcAGC16hnD6AetzVD4bMLrjvZ0tqt9sUyffX1S5Wka/2HFPgF17AMy+QHuR8Rwfq6ny/1pYfE0YXL36dRyi082wIfpTwCIEUJkZmjDkGruXmt9fHx00avNpS+obWxJnkEzajgNMeWSp3kec3YdimoBBYMsroBMwWMbJAKFBmGDGpklRTCo7mAqXqzUx8dHySX12+1mE/puqhbWHD+7zptfV6ugzkix+es2EQY1c9PmFIcO6ASuCCcTiSX5CUt7EotpJPTI7x/uXz5/sd5uXn/xq92wvq5kSKs4xM7iYFuht9N+yjJXDCFQCkgkbB5DAI5qzElLPZYDG3iuveHQ9RdxGFK3DmnL3Q7iBnBw7A07g7hZ39/fa4rddr2vRTd98fRXn//8P/ziryoz9xFjEFdyZ4JgmjggYJtlDqdBIv/6qJODG0Lj1qdNj10spnmeqstYMiKzE6hbMZlFi7pZwOAIakq0+HIDLAvdCE0956xu7th1ncV4RtibP7aqzmVyl/V603N0szIH10VtbUlU2ijxqQG1SAScFltjdJ7fQlt+TylA9kRSzc9s5dOTn69lA5xPkHM2f06Kzr+g/eI8TsycUmoiLcgLhkNErcHgT0ry82+ttT4+Hso4X2+v8uUVGMTmWuVLW+nMF8qixHG12gQw5jhN834aMcQ09GxIEGJqrg3qrqAezYJ4yOIihAWpGBSvOh9HrtZf8TpGBRylxIibm51dr2fJdiiJKFBzbdUnDgquBAAOBARQmymMqAFwa5iYEaIyhaHbl9lTGA+HNw933715kVIys8uQjjnPkzyLNKxunvPwucVfT3eZuSKYAiAgJgaC4qRZXTRXFyOgBNRR2HLaUtyErg9xQ2HlNDANDm3Ub3119eVXr2jdh+vLrGUv85ePt/+///KTmYCHyF0qWr1YCjRwn4iYP3Co8ETjkVP315cZDyNHBEPEuBkgYgUzqRU955rQ0NCqg6jmUsZZSvUhwiJKJWdDiuUJ1d2hmdu5o5TaNMpbZpJrAYCU0mq1IvPtescG4/HIjBq4Kfiaa1UJGGKMTWLwKdB5XpDfyFCWDUDne/k1bBNO8Ob5at8PCx78Lf37p49rq7PR6PaHfdd1hoCBO2wkb1RVRrQnJvLnF3RqVdjhcDg+7O+v7qdnL1KIzRAKkBVECUqt4zge9uPxeCQKl1fbClDd9/f7X71+U1Surq/7vt/u1tv1atV3HTEhA6oB96ZdFZslQuUgkYwM1sQc6CL0m9SP4AUUui4OXffxzcN0lDmvxDp3VSBzbDafAEZg7o3nSeCC3nVd8WyliVq4mzmjufeb9fvXX623FzSkr96+vl5vd5cXdS6ZS7XKalvmXeq3Q+y2vobwfhofNY86o9PA1HsMSl5s3QWI3AdaYegp9BSGkHruBkoBuENOyM3ogRwB8O543D67fjePXnL37PIvfvmzf/mT/3hED6se+yRgzeMlEQ8IbEYUDU/GVE9WQ/vKCei8GQDcPa0GjEEb3dcJzR0ABbyYFqljmY7H6XDkiNxTsZIPRURCCESh3WtEZCQT0VqlWp2zVXHR9XqdUhItANCmsQhxGDqtxnPrc2NIsckEes2I2HVdLXJWeVg2MPN5lX4b8j+/x3NG1L5PTxbn02dYXMSe7qS26J923Wqt8zzP81xKmXMGxJiSqmoIkXkZKEI8z+nCabDLzNywVs05j+N4e3v7arV+cfP8+vIqxmgGgG4I6tAKgHmexykD08XFlTPvp3l/fHd/+3A4jg/3hyGl3cXmo5vrF89uri4v+hQIkCMMVTsEQ2dwwcYUtLTeBYPdMHTOFdRCiKGPod9856N3d7fzw6GoVzcRY2RsDVFwd1dsIotqbujUrQYCrAsHfBF3MbMYuBIYYbde3X/15vbh/gcvPplyfaPH0HHCGDCISG/wyfpyt7386dtfd2V8nMnRtmHYxq4HRHDoYiJeU+yBO8MEFJCCc4JAQNGJmAjZDBUQHKainEJ/dXlk+PlXv/qrX305MeKqywTsSughcBewJ2ZV9gox0Bn4U/M2RxuCnbKgsxVQG3RebdYcQ8Xz8iJvI/SlUrEy58PD4eH+HoInTxUEq7TVch4+VFUAk1JNtTVGpVY3a+Wlg4tUsdrCq5kRQdf3UzeZGaLgCbYPQRyi6gd0Hk6EnwYBPe0NPw3f5/BvT5gN5x9/CgQhYjifEU8z/qepf1NTGcfxeDzmnDnFxfn0/FN0ficUiM9JGxACgonmnKdpOhwOr1+/9qKX2wsTffHiBSI3dN+I55KnaToej1lqDB2HmIaVcxoOEwLVWst9OQLOh2MQvxjW3cX1rtuEQH20ldvKklHuIUPIe6pgMnc1iHeGNheBwsgtGbj69OVXX/xq/OqdZG1ILjpR4KpmaOoq5FVFpEYkqBA9AUAgprikzqOLI4zztNluFZ1iiH33sH/MV89Cl97Z2BGt3JMJGgWgVQjcxU9efLTW/DCPUuZkvgXeQOgZH1GHkFYUk0J0jw6RiZkjM7Q5C3RFQ0Z1IIC0XR1yCSHcjY//7s/+7J2WzfPr1/sHiqG69oAhUARCM1QPhGbWWBTgIKdMGhFPVV1j6bRJfweAbrPhFKUWdyBv5kJmuZAhA6KhlDKOY5oSdqhkMbCVD+EWoEl52jiOAAAKtZRZVUsldRSTgRo9IXWhCfgxUtd1wzDUOdeaW9bQltCUc3Ofb2y5M6RzXm9nYZXzol084s8p7Tkxe3L5E1pUIIfzn/8/X3/6K8uW5QlCa9rbzNz9DHd678U8ZEbOWYWyVDRSV1dDC+iiUUsgIbUEYvgAQkiA1BJf4N/iKxICIZWKrqyOrMoxsiIjIiPiDfe9e8/k7ma2915r8WGZ2fH7IhvXU+jEuccns73XXsNviEu+HC4LrN+8qZba5lLGaZ6mfZcvhw5EgXTA2tpWl5iZI4RHX+zm7QQop+nNq1dZUt/3X9sADw8PDw8PZW5NvZSS+iGl1Oc+pUTAgGCtzONcxsmaCvPQ9X3OQmXverCEnBlypeQ4eavF7snc5zodTwUbd71mLVaHly/SYcAkxhaN8JhERCkYPPRqqqbgRoJtHL00UhPODOgAYkDkx/P56s3L9w+PCWl/dTjfn07j+eXuqiYpY5mmaW/5Ou2y5LPrOI6vXr7sSPflfD6e9OnYzTogHjhPMOdQADNj9Uw8SE45O5LS4ttFGAocbmYJu0++862//fzX/+bP/2zSin26e3pyoTR0rVR3RSRUt1qQuZPe1IId7xfEcEJ0N/hw6UT3r991LOJltkCNhY1atRxMfGIwt7ooMAtRznkcx6INgHLOcQLMczmfz2Gd1Eqd57mVyoucSI4lnocBnWyurkZEF2yVxdmumWkpA+UNaHM5rcKL4RdcVp7+wfr+Wk17ufrjBQUbExIaYUMRCKdLIvJqrbU6z+P5aRzPWssud4d+4Nubw27f5YweYExB9Kp1nmozcBZPYoSTVnMn5JdGT+8f6t38gl69kY/P709f/OSr3Pq+3ymbklWHWq2O2oqxDuPpF93VvqV5yuej1sf+Sa+1c6kP547oRX/Q++lv/6u/TE94/Y9uuKd+GIWodQDYZGoviyaHJ6Jfd/Qw2EQzqvepP3gCTzIc/u/d4+/8p/9k/ukv8ufTrWfL/Aso+mbX0NODvj7y4aSl+OnQf7qzv61P/5m/eaynh/k84lTIJ1clIOQX1JUv7q8RQPCccHqR/wLur3v44wep1dGlT0nIvD7tHPeIfH9POUHuxoEegI5pLK5PBN86vJlPZzK4OdwkhXqcSfmaDm1WR6gOxbQAUCf9ftcf+v/3+MvP/t1Pvnj31cyqXZpaQdfBmU5Tj5RdBpIBpcuHHhmcbuuJGE/kJ9CJaerkPBqeJjF6eXVTprlB618c3s/HkvD7v/NDv34zMuWboZi3psnZ5olaxeP5th9eHIbDvr+6vRoOQ2Vo3vxk2CBjTsBUHdxxMjvOabRWZ6vOiIMBWp2P9/dvT/Sdq5iLTVbBsJPEJM38/vjkTLurw3yea63okESY2b2QIIGaFkvGkknYCPvDlTc9nU5geHO44lVPsj/sp2kqtYKQSLdMi2nVvwIUBBEO9hYgCK6577I5zEMkY9PGig0XQA5kzv2QcxYR1yXLR/ug3N76P1UbIB4nPZ6P53lC8v1+L84551rrz3/+84qtgrWgWxmDCjiNZeKhM2+ttbqqJg27XY/JTsVAq3pTf3x6ejoeWeT2cPDSdBzbVGspVVWhNdau63rQhg7Mi6EAAIB1LML88Tc+8Z89tcnD9MlrG+tsc52cmLCyT+wmBCgnLU0QIdU6nVopYG4ArUbBFyIFXts8TXOrrTV58V2f5nmcplb7TH0/uFlrbS4lhThCALABSN2svn///sXNbcdyHkdueHU1uOGv7r68uroyR+rTcPXyetcfS/n1l2/f/uKrn8xvH8+n2VoeehQhNKoeSE/yJdknMEZCMARYhbs5D10TwtOZkgx56KVHpFomTMkztwo8dFevX0wiSkAIjIYoKk2Zw4omGO5d12WWOI8czBWWxrw5AYJ79EtCRs0VABEsLCLd3fV0UtWu6xJLouTu4zRN5zmaVCJiyRBD0S56MLDl7uQQ3AMnZE62AtDMQiz4eS4WFf5l4L+cJ1w2flZCjLmhOUoYOdkqDtdaA6CUUqIsQevMeSl3VqQQuBNg+DQ9v6HaovU1zsfxWL1Kn1+9eek3tt8PYf99LuNxPk+luCFDl6hjTCOd+rZ397nVcR6b1X7fDcPQmZzfPdrYHNXm+lRO74533fXera9Vp/PUzlMrc4VmCSnRru9HLcmr4UqhRCDAneRO0w9+90e/+re/HM9jpJZ1GsncCZvQSdpTHZ8a3hd80vN7P6Sh916mYz1WrbBs+13XI0knCZmbeYYyF22n+a47dznL1cFKrYCZkVgYIOWchz51eYeQu5Rzrq0Z+K+/+Pzz6XwYDrfXN/1V7ygp50++84kBKvq56afz8ctf//rzd1++ff/ueD7Nh94Ac99Jlyu6VfNlMokIyOgEvgw3CMmBidw3mCQaaNXGpo/3X2Videu63dzmo9eXt6/ffO/bnw0p/EYEyN25SSUCpNylYdft98O+H1JKCIDNgEJ0TVtrDgjmpjrPcwifmBn4wm9XNzIAgMhgW6l96i0bAU7ncTyOzIwAgmJiYG5gQXXfREWZObKsruuASSQ3KiIy19JacyQz26a/W890y38WtPLqnbE0bMInjJEMjAENzZABVJB0GdY6IgpnzpRS4pTmpTNqC5naAQGZuZUa7wGhnODeSi2t2nxq0Kjjnvtd7jtI+/2eM7rYw+nR776qtU2lqRYkAsLWm6OpWZnO5/FcWk0593noMLFBzTNW11kL6N3p6TAd7++rTpOPxeZW59qgKXHiHtoMrq4GQOSWGIUxMWVKzPyNH37v01fXp4cpjwoAPha15oAj55bq+6kcm43Ko9Z3MF9f7zzxNNEZvQVcykzME5IBJZKeu11qbI2Af/LZp9/6xjffvHgJMk3jDNqGlDmlWmvAchwBSkvmDGiIH715/e7du/un+6nNZvB4Onb97uXHb1zSaPXudPry4eGrx/vZmvRDd/uic3J3Y6y+yHghUZ8yuLJDQhKgGNiJIyEm4tz3zji11ghBuICejndDv0+Hw5DSRO1Jy9QRvbyy2x13udXi6gSM7hUM3Qj95ubmxdX11X6fc0YHbEYICTEaPd60AbiathYbgIiAGQAZcRHeMmAAUFPXUmflNgXBVwER3Sw8xayp60LKCA/POApiA+ec+75HYZFcAEVkhqKqgL5tgK2wuWjt0CWUyNcxAiIKGBoCuVm8BiI5KDo5xBHDzBjIVSEiehb/dwjCO6wpUCh1oQPycwv1rKMKdLvUWS8dXQ2HV7cvUpaGdXjsm+l8LtPjY5lmRxf2fJ0piVo9zuXheCzg1WqHXeqEX1zDXhNIKzpN01Ob7ufT6xGpOiiY+jzXqU2IGXdJa9PWVBshECYhTExCKMhmll5cD99+8/DF+zrNDCBFHx8fMYsOWsDupdZMnmmw/P48tTKhy5O2J1dHTMH7by0DuWFrVltzBQICwy/HEx8fKxM2Y9MXkhKRWkOmOFfRHKymhWZNjbtvfetb948Pv/78s/vjEbtkNv37v/087fb38/Q4nU04HXapPzT0c5kGTYBoCqXVscxmllPKKUEFQRTk5JARBYERGIAchtRVVvPC3L94/WIWeP/49DiOZfY621ELHfqX3/vmy9/+rl731CV0RTdCwOZgKuAi/PHr1zd913UZ3FutTATIjNzMYhIag+RFkXSdrjqEknQsGVTwNjUR4JTMVediCsK85FSRTdWmquQhlkjPyiOIxBxS6ZQkpQ7NV5N6W1Edz6DMKJFjFAuA4RZz2duMPxO00LomCGajQ1gyrNuImICYWQSI3IGZ0YEAORxcaujeqsRujmf5s4pTA6UOBRJWYpL91eH29lYSjm1qVq/2h7t8T446K6FKcskJAKZ5Pk7n03Sk3AEhMO2ur3Yv+wwkwNb87uG+OWiHIrmWqdbWpvl4PI31zNjnHTMzR0qAkFgSY0IScDaYWr3jufvex+ef/AzeP9wyyWgHkoKo5GfUM6uhJzBSPWptxydlvB9Pp1IC7BJSoeAOrWFrZqbmigDucH31xXh+ezwmh9f769QNZAatHVJyAzMjUzIXICYkpPdlVLdM+NFHr/cvbk5WC0JiujsfSfJwECNWxsnrVEqtVUgQwQEUFzgxArR5GnKXHTNABk7IApgAGQnNCT0RM3A3dHI15Nc3b8w+/+LLx/H0/u79kdrL19ff+P3ffvlb3xk7wthGHs81J6Q+73P36uXtHoGRap1hUnVhcDaLjlDgEMB9U0WHRfoTrBmYVW1gyMxlKth1XUqCNGst57kiWsrDsEdXMEcDNABcVtFlKr/lQiySUrLUNmSHfQhfEBHmdNEjWt00LnA9y19GChSkewBwdjC3cKeDmL0xCxOzIVgowQMArkbkYO5ODn3fU3hU4aLSv5xcmYGIjaIrmvvMWRxNRIZhuLq6OhwO9/lxZk2Y+5RHm+dSjuM0zRNTOlxfo8jV7ur6+urV1YuMaaFwdvJ0PtEuOfDT6XT/9sv56UnnycV3uw6AhiH3VZsSJumFEzG5oTYHO9X5l2M5fPvNdNMVbLfcd8CHw80J9TGjmzUCMueiMFbA/nieRq2jt2IemICEQQDFGLICogkbuAPMmco46TSnYM8QTrvdFeVxHDt3dmc3VGf0aAvurrq7x6MTv7590bX58fPPzm3ub67drOtSTv2s7Vwqud30Q3f7cjqpmWlbxk+xz0GtR07gGSgBZsBMLECMPkj2pooonZy9zVr2H736wXe+89959eLzr959/v6r9/PJdvnwrTdzj09tPAwDMAmkjIDmktKOu9vcHfZDas20lQLuKqZmrTG1praGbXC3ugxuo8MaC0ZVTRXCAxXQS6swKUmttc4FAFBRqFhtoKGIEGVDuMk/I/MvQzuvj2jE+wUFN9KWIKAtCbvaNjHYwn88fekCLea25oAUyotmYGCRzARf0dwcoQVRWwRZFuaLuSP1AAGFDWwZxTngkPtkRaECMAhISkxEpRZOHCz4lStUCEgkl9JwKnmeVbXb7V68eNHtdle7K6jaDYMAl1LAADPZ7A30VOev7h8+++wzPZ17weF6xzl1XSdD3rFqRRTuJCUgVgdURRjNjufxt16/Oe/zmVSRCDAZCSqZE2ImziQ3JIB2lvR4PtUyGwNy0CQNiY5lKsiZmAJUzOQA6vZ+qsLYXe2h+d00naep3b5ML14KATsQMDozGTq4K7jrOL++vkXhU2vJ4Hsff/Ns9bHMdHVzLOX4NCXw2yRIXKZWHu/a4aq1GirkSWjfD30ScWT3BCQAGSghsLugs+PQ9yfT1hx7meb5y/L40aurV998A116kV4fvvNmRPji9PCIrSTkYWchRYOYkak5JLnm9HK37xJB8D/MEBMSmDcAMOLtBHhuspgl4m3OAAAGEGz2Q7ef5/l8HGN7aG0i2cXLOAVZERZWubkjgMoubzHb/eunwRrUnwGeW+SNVk0EfjPfRml60QUSEUmUAIAAwaxZC+VoR1PV1GVkiEkWARuCO4hwbCBBUtU6TkS0H3aVOaUkfedEVRuYE1FiObbSp15AsACHHRja7jCUMlVrOefD4ZBy7ns99DdDt3tHtL+62e+u+l546IZhd9gfiDhLP00TA7daj6fzw/Hp6Xx6e//+8QE/+8UvHt9+edV1H93edgC12VSKzX59c/vw9te7vt/3++nucb8j5vxf/8Vf3ry4vbm6fqtz/62Pnw4/Hx/tzeHKj+do7vaSboeDFO3O2hV8GKchd/2Lw0nLwzxKlxr4w919l3Nzb4I5Z2aOfm6IGKq5kXcMRt6KPtV5NOuYyMFqnUrNxEMSAqmlmlodJxAW4h6ZHBJ3+13/+bsvBx5e7ndFW6m1qLkz0PArkkYqIgSYGAUoGQhgB5QAspOgs1MikEi/zZkoJSil2A5J8Ld//0cubmgqcG7TGR16FpHg4CPQ4eY6NbPjybXtUjpIZnc2QyHHrLXUWl1bGE9U1+AVRUOciLIkV4NlxmzNFQBExB2bG6+IgdYamDMnAGhzqb42Fc1XdX/fOjlE1OeU+q7ruqDjIOLT09M4jsxMi8QgEdFcyjYhjqMAAMI1fhXl5dgMyz4JTZRFyQQuTCRjSupEREi0CXRpa4ub1KakZa6qMT5DgLAj3+aIu36XQKAGPFzjNFSFZgoAnHjY765f3TJ3He0SpW998t2bVy+Hm6vK0BIJJVXX2ty1GZnqOI73D4/vH+7vnx7P83Q+y+P5aW6zWtvt+oMcFPzxePr44++8e3rsUt/nYXo6vb559WI4/Jt/+a/++u3dH/6jP05Xfn56eGdFr3at+mnWDGiAgtATE6I49GqpUY4SzkzcGT2MGfK+c3MFr6SAjcgNoDE4EBpqqegmTNR1AOWk9e3p4Zs3r5QpHENqUzXMJJBTm09mRgbAjuAJiQkT4Td3L8dWxqnMDdWyQiixws9W+wlyYMfsKEBdBH6HjNAhJgABj06EVnVhAJDM1ear25v91Y57qWpOahjtb1iQfuCchRARLBOnnK+ADhSWEg7kYA6Ey9zI3NU4kYaoopqtEDIRATVcIc1OsZZgKRpXdGcsPaJlgLH0VhZ9Gg/O0wZUu0zfzex0OpVSvjblxY3CvswQnpnBG+aNcFFEjw0j1nRZ/CH076Hm4yS8bkFmFsdFIcLDPSo48+7oEMRHYcYguAC6WiBg0V2ASNFa09acFFYybtd1Ls5DekE4NR1vZmtUp3r10Yvh+oBdMi3T3Go7++PJmttcEQCbz/P8eD49nY7naS6tfvpwh9ogEbDwPvc3V3nXH3U2M5Es7jq1nvKh2999/tWP//WPzx9/43D7wjn98u2Xx6eHIctTaqxlD4pO6kiO4iROBMRm+/1gjNYldZ7YocuQxcDv7+8ZCd2sFgz3T0JHEMlWWzNTJE5ipqda9eG+7/tD7gdh6JKCF7OENeooA2RwNENkgfAwkiRKDRBNiJRB3c/TNJW5sWprpECACSkRDyQdYoeU0LNTAkgI4dhBq1CBAnCSeXr8zsdvbl/eWCdaCjmiOpkjgIArIAVJCJwBM+EupStKgxuZIzoimpAbWAv/amN0SBAzOF/loWhxKFpU/11XXD5d8GN94ZZGdweIzJ0AAcg3hpaTfZjV+GZfBP70dIrRLxGFkDhsVcfqAhYTujh/QhERLqBEsaPE1BkBYKMIIQYyGCCCOCIiL/mfx6eMfw0F88sPp4qLcdTyTkSkY7Wm7VyhgHSYRESEmbhnbc4IivTiIx12dTrXh/dPXTe44Xgqj+PpaR5LcCGqhhsKOmno4CkkFmY+diNJ2u37j65vPvnkk5evX2Fm03T/9Pjymx9P795P5/HNxy8//9Xn/+b/8y9P756+/0//2UcfffPzt28/f/tlPY6GfodOHY5zS0baoAGCqamhg5OLSCMAYTJzRTWFBmF8gixAFOw2IDLw5pYOshgfmimCo6urtvqr919d97vr3W7gRImtKZqB6w0jI0qI1asxEQI4+nwu6i6SLYG6jtP8UMtpGhsvbOZM3JP0yB1yTxRmYQkhITBRrH5EByYnVFcFVPePvvHJ9fXVhNbIJ53p7N4akCdKGSVxBjR2EkZByA5iJugIFsUgEKISADQzAlUkUNzK8SWXjm7jkqM7Ca+ggefFTGRrn37J41XVozKKTQsU7sUBm18oxavQpxc8nU7elIgCyw4rZWVbeJH5xClBxPHEBfR1UQoLusfJtQyfAZ3EYaHtkD4/ISxKMmFo+bfW0JzcGYkAW6mOoAjOBISMKMTGUmaHZtKQkPvU77pdn7JLMKxc0SFhGrKzNEB9tGms7TQdy/R+PJ7mqYKRk6vtcqcG7KDq2pqrATi455u9z3W3233yzW999PpNStKs9rv96K1W7bqOFB++eviz/++//osf/8UnL19/+1vfA6X3X757uj+xw9HhHTYT6wSyNlB3A3TLBsae+nTWqTpA09HqeTqdx+ZM0zQJ477LvaSlywGualOp3jVyAAS3Vs2tadhL38/nc50fp/OuH3a5E2IWAgDWIugEzgZsJGZMSGYVURkn9GNrD2V8Px8fdZqooQkBZuaeuKPUk3TIHRKbZQcBYHAOWRFHBOQkFd3ASrPdvn/z5lU/ZLWaTNjAytymyYkQqXfqSBpgTrxD4bmBTuCKwh2LEDijO2IlhUUNU1Gs1mjFZEnh3xALPVo+RM7MhgiGoXQdfZ5YiEv4XyIowrL+AQDdDAjdvZQS5Swv6PqmqhGAOHRB0OOI2aqFdUfZAsdwD/w/Xjy2rGlNgRxww0kTEpCBNdNW7fLhhO6AuIr3tqXdG4RljXoiMSVxWk7AnfRg0KgJSZ9yl3JiqahOhk7IRki8y1nsNJaK+v6LL+danubx1Eq4WBOJOE6ziSND0CwVHBCBiaBnYuivd4eX1/mw03metaGkYb9/+9VXL168MtQf/+mf/uWf/RU5ffTyo9cvXo+P58cvH1HBgR/rPI3H90/nj6TrXMWMHBigEhWmhDTVVlTN/GT1VKdTK8ikte32h/1uuMq9lea1uXul1qnf1UKIoTxlquaGxCLsRse5HOfSlXnfD0PXd12XcxYEAk+AyZEJmiGjIRLuuknru3n8Ynr8cjw9trlkgIF3NSXGXlJPQZehBJQcEwAjMDoTChKGvze6ATQ3Q2itfeP733z9+iURCCCaeq1tGus4GzOnLOpJwaLDqCaOZEqGKaVO2BdHdwYBR2im7KSorsBEjGSrN+MlTwsvKIjRq7GwDwtaueNaSRLzIm6OGODXpbe/KTuQasyaYwMQMSNtPEncGIi00AnMnlMgREsp6WrTCP9NnGB3JEIMqcDwNWpN1WnjpCG1uWJKxMjheOReSglTNHVrZhi+crgcHR2lhq018KYQmHJkIgdGZgahUqtPNmt5HI/vHt+/e/cwlzJZM0YMwUk3Bxyn0iGHTx4iSqh5Ij7hfHU1XL285qEzBg94mDZRJZLT6fyTv/qbv/zzv3q8f/jWzZvvfvM7mfPd3d18mtioNGtEMKQjFlDYu/fmCUGQhKJPb0g+m6rC6HXS2sDCB4ABxTE5uoIrEHEPmMgebF7miG6GzswOUEyJqKIrQq31rMrjeRiGYRgqaUbu2DOQGLEbg4C1pvZ+Gr84PXw+n+5sHjN7J5b4xTElpj7lDqkzFAABJHdhFnACZwRGQCRkAMAy1UpGHSnoD37wg5cvX84xpA/jGTVRQCYGSgpiUNC9aZsr1DlFBwkBXAnEiSwqAXd3b6aMAQdeNPEv24vzPP9G0HVEdLZl0Xt4cwAihiYcugMQgOmm22UmtAjClVKAaRGvIGIWcthEEenCDCbEI7YEPrbilvzAxSAMACQSo4tP+SE1fov9sc8Aaq3MHIJ4iOihoavWDf0CQlIFpWX+rQZqWq2WohW1VNewl6Nq1cgNXFXP8/nh6fz23dvPvvisjTZrUyQeusQ5SWYkcTTBDBR0sxpbz1wdjHF3c3V9e8NJgDAPGRjqNN7f33/y3e/+/Oe/+Ff/6l+N94+9dK3qxx99UqZ6fDzV82TNzaw/7G92N8cGD7/6QhUMaHA2NjUEVHXvm4+tVMfR6liLEnSEWVIdZ01VjWwqXluWxIA5BiceIH5EZgCqprWW5S4kBoBpbtN0ktN5GAYbsCPpJfWYk2NyRiyAdDy1d/Ppi+l072UeUt2lmqkRJJYucWLJSBKMujgMkRicw0l+SSscEKc6qwCSIOIn3/jocDiUegx+AIIJcU5mkpgkEYujm5WmPk7dOPUKTBLpcsppwZpGrAUPdOU6bFoWXyQgl5zDiyWIUY+uy2tb/RCKEgAAqwn2FpQXlrDZYkgrxMzEhEiwltd46X2tZV29l1igD1jfWwEAAGK1ARE7xAiaNSTLZRxraohCacbMAobewKp3FToGNm0LJYIdqLn1nMWpatVR8axElIkQMZ2u6Hz2WdPA3W3yQ9NuVAQS9hMiiMw0fXr3y7/97PPP39V7AlcBYkMsymNLg3BmEs45N28zOTIqamsVGbPI75/lzev9y+G67fKnYA0cmBiHjrpDS3/5//iX8vbpo13/6eMXv/1HP3x6YdzKw/t35Vx0aqiCmO9rOfJe33zz3NpX5gJu2qw2Qk8p/ejdBJLqPE2nuYM07Had7Jh5Nno3jo9YrobdMPRzUW+VEu+IqkM1a0SGpE6K4pxAwayWqg1VBdpNOpO9h+PdlJww/O9sEW1Gd9dSCTBTyjS8UklnyWNCxO/DjI3AKzIIc2LOgNmwR03uTASISlbRKpgi4G5XxH/y9tMf/sf/5Hv/9E/e2Zxubuv791S9O9U3jV3EiFPXdcNgiD/6bKHvtdYwpbJnka5LXalIikw8WDfZ+TRVJaUuedFSDREBCVlqa22aYnnVugRjiYGTEAFM4zEU0DgnVR3nyd2DIjydR50LMiR0jjgKiJKZOWYIiJhTotD0d3BzrpAVwEDAwNyrddf7EEOfpzqW5hpeUM/CQW6AiKFqZe4SjGYzi6RlCfitlVZbawhea8W64CDi4FjEJ9QA4sYhAy6EAwcCtFXjxN11olKK2TIKJCJHBPBxnImoVP3lL3/54x//u1//6vPaoDVDaI6hzr+gsqspEGLB5s3JJQtlYOZu6IZh2IMNu53kZKvmGTS1qn03PD0+TuexlgJoKaWr6+v91dXdOM7zHNTs6FLAivlzM0ZAiwZAVXczezefwJwAr69vD7tBcjrP0/F8BERFIEajQDy4AJFDRmYwZEagRoQGHq+Itka95bIpADl52CovLcClvkIgY0eHhCCAAtSx9CzMLLVBNGQQhBYRPl7EXQgtXArd3Bzd3Fur2HdP0/njb3yy3+9H4wLN3ed5nmoxtz71abfr9gdM+ayLStrS0lkJfYjYdd2S6KoGMSCkEnAZeC1zohiKUXR1ohseU6PwFAewldsuIhT4EXcRifVjCOyAiEDIwIwUoE0nIoPQ2AJyUw/pQRbRpKROiGZB5f8HaJDLz2t8uTyXJJYphkKgeyg7RL0Da7dINd4htBBVYdW7xcjDEYjiN9F/amZBiVBVnSQSNVXVBTjn6g0QS61ffPHV3/zNT37yk5+cT3M/XLmjeUFEpkQIQLO6KbiBpr6rVoEhAedFcHJ3OByuOx4Oe0oSpzCaaWleihv96lefn5+OZtaqHa6vrl7eYpLz+8fT8Rgu2agLn2NLCrf/jS/uru+59Ule9YdPrl7cdF0bZxzLrDCRGbuKNwZD7xEHoOw4MTdjAZgRCkBBNHCIsg7BQ5csUmAnBF2gKAbL1CgKR0QQYUNxzMS98MAy5JxEOi0hPowEDJSQE0A0jmi9ZWgI6CFBXwWLtf2Lm9//4z/aXV/1M06nh6CrRMYyDMPucODdrhL7HFomvnVpghHeWkspRThHxJTSMAwhfEJLW3NB3WzZcmstZp7b73EV5GJmzinnDIQi0qI7uWLvn5v66ESkjsBEhBC+AUyOiOFZ6MAJRNVB3Z9tPC9voq1K+M9ZP8BmYY0BhkOACmBNY8gMAIIiEm33RU3AACOqU1Ori/QFpeQESCjOaGuyhdZMtVqZa2sNW4g/Ni82TdM0V0xzBUu5/+Xf/+rP//InP/3pz+Z5zjkL8zQW4Pio1RWhIRoYgnqTvoOLB/IycBmurnkYFKlqsaZgzs28tPdfvf3pX/71dDz1nDCn159883B1c5rLPM7TeTyfz1idFLQhlAraSplY3SEKs+c09emm2+2uX1y9eiUHehjr05jm+nI4vJ0flcgTKbq7JqQdYO94BNSYxSIKOpo39+ZqAbulsOFCRAIPZYa4xICA5IgE7IDuCUkAM+PAMqTUi/TCQtSxRICE6LMBZkBxSIiRdyOYgTtQBMPW8dvHuz/5Z//BH/y3/pEKQ0VHKKU0N2TKnEJ/3NUMwM0CL7A9bNW2mef5ssmYUlrYhmqAGB0bX1SrwMysNlcHs6W7Yh7hmYVTSiEYSsLbVgzx6joXb9pqbeaxATxwaADuCsxx352dURYqIqGuyr7wob6JX9xKDfn3aLdengCMAg7a3EDDRSGlAGswsSk4Oqm6m89TnefZbGGv5ZwFybARMxBBHG0OtanW1kr10kB1LrO7l1bc4Hg8j+OESarp3cPxz//qr//iL/5mPJeUOgSJrm3fJfclc3N3jFE5iYMiORBuOnsAoN6wS4qgrZZaVDUhobpXe//2q09/9gubCjH1w/Dd73/v6uVtAyNAb9VKtWKkYOrYLOp1MydwQ1oL/wZGT3u83onuOldCd2rWOzFwJrZMJkE5VPLUIe4AdoYNQsQKGVBBZ/BqRkyOho5hsu3uBGDhfhnf1AEAWEEQCSy7d8R7Sbvc9ZwyU3Kn1noSoGjyIwIKkgAIQgIMkl5DDJSCgpv7o5W7Nv5H/+K//9G3v/l3d5+HZZ2vU1shQcRSCgB6lzPyHOe26nZEhNzs+XzeJM43cwoRwfbsdLKe8+7uWqNIdFzQmkvBSn2SLnddl/uORZLZPM/zPIvI4q4C6GYqjOZC7CzIhO5AiEwp5aXZ6uRmpGpozY0cgBH9WSUFLiKZ4Tq+vVA8if+VrS2KF3jrRUgDcDF7aapudZzO5xOYLowyp8TGYE2BGQjYm9X4PuepziVKhfk8IlPVqmrnaTyezypUtf3i159+/tnbcZ44ZTcuUw3CGzO4EwIgh5ZgQiYhbFodjUVQWERSkpBlb0TqXkorNXInYkdVnx+O5/cPqNZUh2H47vd/+OLNR5O1hEfwsAxSMPDmkacSES4DRVWtRZtrI6I79YOX9+V8UHuJsNvv4WTn+bzrMwk1NNdq6maIlICxM0wICVCIELwizACVIq8N5ZHwVnRQVFfCFOAaMCXEhJgQxCUhDJwOKe8l9ZIEHMzRvA+0eewAQHYUAHEgB3QKaykFbw4KUMG/qudv/vYPfvcf/1Eln7SGrpGIBFXFEZqqtgaAnaRMPH8onqOr2KCZ5ZUNu6TNy4n/gQ6PqbqCmVnTwDQgQvSp4o8gcMgpTGOygjc3aHVuVUMAa+F3IjOxSFtVdW3lphPJcsI4mxknWcjHtjmMLhik5QPSCvX5kCas4AAgtD1WFDXLsiuYCIBUdZGJatqmMmmJMiinZKoGCESgRurNvdZ6Pp+n0zl0i0SktSaUENEMytxOp3E2O03jr3/12TiXnDqiPE8NEZkFgQHUyYWS5JRyT0mA0NBAXdFYRIRSJ13XDUO32+2U2cyrtqqeCYiQHVD1dHdXzmOfO69tPxy+/e1v+343np+0VJurtsJGZGiqXuoCYQ9t3IU5GmRKUjEc+pb57mFsczmgQyJT6CWL4GSt1lmrKvHMiRlzAwBu0R4PRU2EhjgTOIIDMhABht1yCGe4hzRMSogdSyYUhA6gJx447Yg7BFlccVVCsAYX0sjChQcPnoa5N4CCPrvP4NX9JPY//5/9T9LN4f3pSbosVo/nJxHp+36aakxnHQDNtLWmGto2sUnC2XYFri1ORFv4jBZFej4AntNsXKxc1uW45STuzdQCYsnMOTFAqObE0RSVUNBtg4zrCJyl73sOMVlhVW1zm0wZKUHXteburVRrTV0TJaCL6RvTlu7b4orybL0KABLivXHM0erEFqkSM8d9YgBoIEjkoEVba2gIumwiMw/5MDMrc5lO03SeVTWlxChElFgQqKk38/M46zzfH5/uHh7n2hRwnqs1TSkTyXyeSZCQWDCllDtBjkYBDrtd80YJAoWbuhxqSgXQ0duiBsPioaLmx7uHdj5L7ovTfhhubl48Qi1zLeO5TbOXhiBgaLVarW2erDWoGoxyg0WhIGUeunzz4sXQXz+dPn833l1Xus2d7HYinB1E7dQc1Sb2c4JGcDACggTESABWAUfEGdxAfUXJsAM7oiEYGjghoBMjdiy98MDcIWYAARgYO4BsLnGkM5MCAJgDwWJaGdnuMpxCb+AVvIBNrgXM9vm/+5//ixHaaRqH691kLQi7u92unScvZuGS7qBTmcrcZ4nVsxHMA04TBXE0gmBFKddaJWpvd/BwkA+jxFDYxmhtRZPKzclh2U/uhkBEyMRJSFgQjAgrRs2hq1SjE3KSbuhz34uk6F+11gxBmBKgdt3Sb1TVD2cITqvPMDxn/QrPq9/dY3q4/LdkEI7NfLfbxR+l2M1tZKdeuslGNNw2gLoRAJBXtIeHh/F0BgAzr7W5OiMnZjNr1swcEYva4+Px07dvz9OsBmpARKnL0cLb7XYTjBEeYp07orq6AaClJJJ5OZ0YU0qcWQ1qK7W1lPO+6xJgysa7vZX6yZuPvvrq/c1HH7376u70eJzEb29fHvnLeTxnFnIq40iOrpZTGscZPAwzDADUDdyg+UDdfhgwp/PAxw4f6/lpml53w0fDvp3GVOBadhNMd/N0Zrx+cd1NSkgiLMhAPjjsrVUwYspMiuQK0FQcBEVSOoODKSMNIh2LuGeznmWXUwZISB1iUHvR1d2ZkoeN0xpWY45kBgoufff24Z0P+X4acd9//u79/+r/+n97bDN1/HA+DiFcZu5m2towDNXmMhczZ1LO3ZAyYLTthZmjBbSGcw91s5yzqk7TFLPerfOz5Btr25CIfLVPV1XE5c9KiMyCM7MjaGuG0O93Vltob5qq5LAPa7VWzp0BIPMwDJJTrXWuBQhzlmgfp05wglKKug3DED39diFiiYhIzMzzPBtC1w1EtDhJ5ixZBJ6ToOjPwQJ2uvhWYEuPP7EYLjJBtHHk3WutrdSlxlKLxpGqSkqhSuhgpq7NHIVSNmdHQuQlS7TIJQ3ISSilhAxV1WAJFUQERE5LAwQRnRwAtFpr1rQReEHMkkJH6OOPP/7sNL168fJ4Og9Nf/bTv3v5g+9KosQcSBg0QzUyAtMWoCZX9AXotHxps14xI2OXyiBPO2rFanXyuq+VioI6EiulKfmYYBR/k4WRwnQVm7Fq53RFkphmIUVwMFRihI5QgIGaATFST9QRJfdM1CGKGSOxG1FouGvcBcdF8sbXLkcYhVCW8Xg+Tudv/PAHf/H3P/Wh56v9daYf/eM/kl0/tgmZ61yOp6OVGuQNIEIiYKI48ZAESfrlztqqmxCPCP+xbmzVF4lM2gK7H/jm1eTLEYFpaSQyAVIgS5zCI24xEGgenXGda4nWuasCEfqS5F9foAAAWKJJREFUo1Oirl96psuxnGSaYqDkjAxqAU0ANQetoe69MgEAwMDD4xSF04oSXYZxUV4Q0WadsNXOcdhFu9PMYhjESNQNZpZSEsmIjLCYytgyBhBcUB/BXc5EiERoTORO7EhATJzUAKKHCmRO4BowTEy0udxULWoATExkwLxUKZxz5rzcJ9fmCmjYmjZuRpxzt0/8w9/+4fj+4eYKfvLzn3uzv/nzv/6TNx+lfuCQqjGz6tY0PC/aXMAtBh2ES/oYjSABFGLMUpOMPdWeEb1z789jNxdq5kxPUN77PM2NR/pBvsrECMytWTNW3yGxEJMJQgVUJCRLaB1SBjAhMBDiLNITkUFC6JjYgd2QENwNl0Tb3JjMgxT/YU3Xp77wuSH+4otPa5IT2vuv3v7v/8//x0++/53TOI7TyEnmaTw+PEJVIhKktt10XABtAtR1XQwBYmFEzy2ieCTrthLeI03iNaPAVcs/Sk5cYTXbFoJn5wsMCEw1VbP4r7bW3BZy6SJqRJAkYv+w64ifUUbuC+uFDdAkWqje9HLdx+cxCGoNFG1d14lIbdbWWZ6IyIIlYQIgDPEoACesujhIAzoYoKGgdJRbdnQI/6YIAL7Yry5QO0REQXfnOHgQolHGzF0eOHVQtKnXoBcgE9Aim+GAgKEir66uoKbqBkBGLIaMKELDMOz2u2EYUkpATkYJBNgWB3dwI2ROH3/zG39/+3c266sXL7XpL3/28+/+7u8hidUGrXpprajVBuSupqXy4qKjZhI4RUdzhxhYOmJFL0Q+JEuolh7O2qEjmJo9gd0DTOZe6kOHOyEEHhwQLaEiWgQRREBQdUe17NiBZ4cmBArC1DMJMUIL9Qoyxxjdx4AVLYa7AA4OSqD+fAIA0KnNOGSG/Pbpjt/c/OLvf/on//F/9M//s3/x6Tze371H9EQ4Hk/1PJF5madVdiqqkmBkAV4Ii29LPE77mAPEXb7omyDrIiIYiw4cgHCbSQGAmzrCJsZLAyNTrBxcsQXLyonEvSmYkwOwg3kFEJFQHSVaRtTzPHepEyRiBDER6SQpV19Xf3TQkYkRzMwAwZ41dxExoLgAIK1UEhYARQRCwkVfvLUWu9cRA/KODkKkxizMKTlS+Jo1NS2hZGgOy8kaUwc1EPLaWjOllFOXkVPRdj6PpsEYwzjV3RCX0pPcPaB/RIyICm61AmNySSkN+93hcOh3mZO4O2lsU1Fq4dBWtRbg3fXV9e11OZU//MM//Nu//+XD49PnP/+lpKxziZxJW2m1SuqDB7oFLQPFVWSGiPrrXdplR1tGpJKBqRiTp8oKVRV8xkxMuWPq87u5VmQmzElyVWwkDuYQAuuEZOjInhyzUQeoCMDA6Jk8oQECuaGHxMyipeMIZmAOCpDRNbqmuLDr3B3Ap2mSfjhbefHtT37y5edXH3/0v/sv/0/v6/n+OJ/L3AuXcX735VdeWxaejmdabbOWybS5g5pDeG/56iu6dcm3Dg9cSmUi2lwjftvqT6pu6ss4N9ZhRPZ4kZQ6Zt5EBJfMCYiTRDPGatPaIJTXBKvWhTGJiACqdZ7n6TymKwHi2P+MICJJpK6m12FOE1/Q1qbW0oMyC3UtZp5rkVIKhRMlBQoaArYaYnchrOdNtTZf22HPfDNQMK+1Rmq4nZgUOdw6R/Sm6sqGqtqm6Xg8PR7PzXTzBYzPSKv6l/pizxcNw02x2gkX88AuGrWmCtTQEDwElIgcoaEX1xc3V68+/oga/s6P/mD+f/6/3v/NXz/ePYwPT9IaATJR3DdDBTRCJAdfKoAF+02EKaX+1Q0fhjYXrw2rChI4jWYlMxJhZgBAFs7S5cQi9/Ur85aNOueEnIjJUN0GRCFSZhUnTqlCVkjOBYsDMmJGpG0g48voxnH17ACvsVO1GUJIsITjrwMBGEh6mM81069+9cs7m/8P/+X/Ra72IzoQck5a6+P9/dPdfUamIbdSolhcddcAHMysmU/Hcpm0RJLsFxgHXB1X45/G2prb3GosDCIyhBjkQMgoIJJIxDIASH2f+o6ZYfUm2zBmkYqbg6uZWpBxyatq1VJRFnr7PE5RKhg2wlWiIVFK7E0bomncweUIiviWUgogDpGklJAXyS0xM1cgImjNEcgMjBCx1mqAFmO8plaqqlrT4g2Z+cKAqdRS5jmlhMyJlwaCmRn4avAOUUUdj+djq3f39+fzmSRZ2P8agFlwEZjZPVIpilIJAUQEGSnJqoIU8xaNnU3eazhaJCfilHPuuoSZIV3dXH988+ZP/uRP/uanP/vTf/cX09NpfDzfpCwiOUsbeTLTUHUlgpV/7bCwMEU66bIPWRmrqZWaFbIkADurLkEbgFE6lp47QIKGZ0Zy61sdmu0cmEgcCCFH8c4cw8qEmBXE8ETVbAHxM4Kioy1uFVsioRAtN2iBVAV3gIauyxAKHSHv0vE8GvcF7D/5F//D//A/+e/9+O/+5nu/9yNtc621PD2dTid0cDedCgFaXPMl93BwAHU3G9tEa2d801aIkmAbim0ZNl4o7cQGYGYiYeZaayRYzJxzjgkuANguxUANAGJFXmD6GYlQGBuhsyAlkZ6jFTuJZQWfpqnUiYhqrQQI6K01MGOkzKJckVJE58DehbZonF7x+VNKJBxMS2YWD+vqwPFTQDaWqn9hpplbbd6aNY2GZkqpBVZglRqN7xDpO9lShofnVxgxMvGsOh6P99N4PJ5LKUlEl+GgBzUoWhPFq7AwpRYnqWEi6vrOaelMB/uMaEHvZaRmwWXGmO/0u2FHaXr/ZGY3t7cvXrw4HA6P9w/d1eHh7v7FJx9nSU3SHPXblvzEMBigLcoxhogiPIPO2lqt1jQhdSzVrUGBRCYMJIDSgNSImlttTjgrTKpz1WYMYa4BNquzmKMjMgvEKIABCJZ+iVCk4GHDYTF92RpSBq7gDlC1RRO9bZ0zRweo53NTff/+/f/0f/lf/I//F//Fzz799W/97u/89O9/wTfd6elpfnyEprvdzkrVVgWpmS2pfzBRKHo33rTR6lO9poXPyEpfH1vhi0wQrTlAEk5dJhQz0+MxdKIC+dN1fZwYUzYRwZW/0lqL0yOzLAMsAGMmwEScUorsoNYKQAoeUKXErKpKighBut8qk9R16wYIqlnkWniJOIr4ztFaLaUSkZiLOikQszAjgjgRUatL5yulTOKllCs3aM0W0o2ZGZhlwtZq04baZBhQpLXm1oTxnTRmRuBa9fh0fnx49LHcWA9nN6Oqq9WCEHWMidKwc/cCDkjEQolcYMKWu9TEoc/Di9s89Ocyt6oiojYZKJKJ00HkWroBxItrI5bDv/2zv9nLa3tCPop8xQXPsH/3Bz/8xtt3738y3WMuDQ0B2jT13HWc61xasSxDkU5Th/ub4UhK83ycvbk6ltKg6o1JruIFjEAyc5c9SWWdQlWsz3wl9++P95y+3R/g/eO1D9eyH9zwrIbeyCr5RM3I34yDAym4VqqIDaEJFMDHMl6/uT1rLaD5qv/83Rfv7+5evnkJj0YpoAHewBtA9TZa+8Vnv/in/+w/+M//R//p7/3jP/gU7+EFv73/9f6K2/G0MwXC0WxuFcEld0kEptndWY0YDM1Aq7cG7dbR3bxVobBBFEOoTn3uHh+PZtZRb7OrGoqVVmauijBcHbqhD4h/bQ2AyuwpUeq6nHNiIQJkJ/RQFmcCIvBmoC0Tpq4nor7v+75nkbYq7DbVRlnNjV3JTXWqZW61gSdiA6+qio6JnbAqVkEk5q7vWCSAx1vpQqAejG/tcgZgVa1llDh9YnKhF/RIXbmY0Q/1VZHC27h05VfZCV9FSaMMCqTUNhxhZjdsWsdxHsdxcfemZ7XeJd+UxSZthSY7hC2pSO47yin3CRLH9vWLR9d1Btq8xTR6rrUnEkVVJeHjeP7TP/3Tv/zrnwCAoQH5eZ4+/uSTq9sXpek4zo8PRwHuhl7HVm2uqobmwAre53R1dXV1e82EYd2zxEL3yEfXBPoZfBsnrJkqcOrEWSY0JqC+fzqN4tHu1dJaAVMyRxjAEU2RzKGgVYBiUNGV8dP3709t/Pjb3/jdP/7jH/rv/+mP//W///f//ra7qmOdayuRsgrT0KW++1//b/833/z+d7/zW9/DJOc6z9oaqIKntauzhsklpYnjFJDQg7e+eMiVMgOsCTwsvX3E5xepquhu1UJqqvUWh3/OOfVdsHgNoWjrAu+TkiDxqnIebLV4r/AaZeZgF+ScU860Hg6+0M2WdRV6GSmlLCk+nLubmtZqFq1MTCltT4SVz7CdVyGWGAlYLGxVXRhhoQVtDmrutHSBlnW8GLuahtuZt9BCg1WDJb5YZIGbnfdWMBGRmpfSTqfxfD6XUsEJaTnWl+Yx+TrpIrO6TECIolmbd0PKedj3LjTsdjEiWNt/gYtGAWzeWhypTGSkrQV68c/+/N/+4uef5qFTNyc0hcPh+vbFy9L0dBo///Xn03m2uRl5UVVvAT0y19Sn1x+9Orx69fjwcJrGok1WyRgiChkZ3PQoHQCdEVInNlcX6q72Oe/LVI/lNLe5c8xEBK7eqmuFhk5I9IAhTAZGoACT++xewWfQcyvF7VHrRP79H/3O7uOXb37w/b/+8Y8/efmNly9fl9be3b038B/+zo9+94/+4Hu/9UMaxBM/jaeG6kzOZGDYlhmlqjIE1WDB5cdCpzXFjws7B/XQn91GI0Ov2nSNlbVWUI95Koe+YJfz0A+7HSKG9XpkpInCnsxDygWWEWtISkFUGiH5bxZAL4n9GU0bZkawWuviJgYxCOvAMJZZfJigJQR+oOoz4cbXrm7co6jRQ1tumb7NswRqbfu2ulKEt30TF8tWXnzX5b7vu65bNyjCqrwVh0DEeF+JRegC3rZDLdzD/MLzdfXZcHWr2ioqIhIH5DPFVDwuE2aJNe2bqxfAVCdJKdhmjlC11cLBmTtcX338ySd/8xd/W9p8uN5PZXa03PcOSCSvX3/0e7/7B69uX336q88++/WnrDLPpUAjZicnx91hePnmJQ/D/d3dOI6qmiVDdQtI29q1WL6Ft+hKVm1m6oLd7uqjj781NLs/Pv392y974EyYiB0sgjeKMctkMzABqCM2h+I+mRXTu9MTJG5kX376y4f/2v7DQ/dbv/Pb/+QbH//z/8E/n8rMzLvdLqXUDCSn/mr//u6ujaiTVjAQlr4jYAVv4zyP0zRNqhrLkQDD7hLUYhRA8OwShEzu1kL4yhq16A7BXGvRhotN3uzqcdr3wJwkD33X92FdwW4AkLqMiIwEIU9S60LhdW+mUNTdg7u4ZCi8kN+36mKJ4oZgLXavEeGqt9VKCSZxLLkYaMcr4dJOXASwYmXmnH2d7i0xK9pcl3mSXzAetq1z+UIAkFOfpBPOZkaoIaoHTkk6U1DTVi0Q4YhoDC3BBidcAo8v8hVRpAOouYM3qO7uDS36D6nLqe+2kDDV0sk65d5KMbWmCsKMBCs861S1Vr9N+6Hf9Z/k6xe38ukXpXr1GTNhys2oql/trr7/3fz65cuh6wn8008/na21aoRIKSXh3dVwuN6dvJU217rQQYA8LESBNu2noJ64u6JbKTM0K6Z8GD7+re+93l/fTac7nR7ePwgQaQtlGwNnY2p1UEcnRDZicyyus2oxpeuBkjj5WOavSvnV433/dE9EOuTCygzUIXQ8lzJP5zbeEQuzKEI1s6qzqxOaWTrXxbHCwRIKcdS+0QXyC90EAzdwSIwKZlbBWBXDg8ihgQXvxFWLNlPvKCGTE1ISSYmZnVCSyEUNDe6hNlvrqqOPHkQBFkEiSYlFaPXAg4vH0opVBTUM5y41dyTAxFzWnj0iEgqsGKScdtsMe3upKH8v6/jlgEpJtlx82wBbG1hXFeltaLJtjHC63MTggyGw7RlfQUjM3FJ298CIL2mZoakScVgnhJiSOrg3N0cUIuIknNPWiQsSEyepteIMBuFIAgCAwkCrMpi7q7XWprG+/viGmLGDbt81aGOr0ifqpCk6EjMTQErp5ur6u9/9dtelqiU9PpR3palLx/1u6HYZyWsp7hpLP6Y2RARrQhvq2QiGFoaOtsv9rKO7ozDuOt3lecjzvqu2L2pemzUwowA3gMMJGjojBYsXi7uiVcJXt7f91QBMXEbq0udPD/mrd28+etXK05s3b169esWAT4+Phdru1dX19fWXX36JJGqqVas2rwjE7m7zHBJmvt6X6LQQEbg5IQRjfGukMMd+RkRgkpRI2JAgMZCYmTf3eQKHhTklLCmJiIKTmyB2XYcSJ/IiW2IIqlpqNdVRa8BRI7Rt6cZz2rwuWW1NVetcg23ramqotUGIr8CKOQUWBgBYkNts2wte7qUtjm9Va0xCxM0Ce20AuHKWo55okagwh9JT9OmjKgnCeEyvQiej1Rr/bXsOANxMdeldthqMsw36EMMPJyAFD/sAQCRhZIoOGjFHdEGm5ktNUkqJEcayQ5yAsLXS3ISQENXAVui3unFKzZUEb1+/xORGZABVrU1jnUtOfH11xYIV2tuvvvRf8MPDQ+r63W7HjKfTU6he5z6NJ6imggGZIXcAZAywV9w5cwDYCVfTLvHV9b65/eyzX/30i18/eIN91lI1RCuIxNnMvKmjrOc2GZABmjshfTmdb3fJDOZWwcrj3/3spLq7vf2j//bvMbM1LWXOh37A3Xg6/d3Pf7rb7aBZqxWaMiCLAJCaurkgWURDc0OLdyMitwvD4JjHuDuhBShBuBv63f6Qh544mdnT06mUMp3nos2b9ftdlzL2tN9fSdebY22WDJw4SY4clR0Bmzk2B1WvkSCGCQAykSCyGdS5XaYrvrpTt9bmaQIApuQMYVtqZoJCRNZaCLfxwt+CrSMfL0Krm6pvg7b1Xbb2rvjaw7ncNFF/0AqKijBMywD8eVetkyl39+PxGOXIdkrERgqC6nPRzQmAHKGtochpNUJjRCITjvjhAIEDjckFA0fxba7mS0M356xFVXVuVVUhpy4lYUSROL7UXTJzIurTi49umynlVN0ej8d6OoI22e84JxH+0e/+aLg9PNaxghJx1yV3Px9P+OIqJUlZnJYCiZiQ2Vf+UUhCIRigkbueJh3n4aPuzavXOee3d+++fHzATo7Hcy2zVSXEHQ/I4mYGKLu01vOEyAxECI7srTbAcZpQ6LDfHc9Pb7/86u1XX757+OT29rbrc2uttYIszCiEZGqqqJaAcu44p9ZsLM3WhMSXPNQQwNeqbznYL1ldobWPSMK564bDfn91kNynlPqHp/P5fHw4TmXWov1uN3Qd99zvdznnUmtrrWjLqmomKbmZrmZZ7t7czCwwCFGMRod0S34iRVp6TbXO89zmMs+ViFJyZy6lzOMEjiYmRGqoqqYqzMIZnBrbPC/CFnjxiCYNPesx4panyDbv2FAMcS0iydn+ekt4skjQqHVBfJCbtaamQCjgVkrd8h83Q10oF8yplFZKWSAciI5g4ABOCJw4il3fD+4edKFhGLo+O8JYZmQQVVR1NBbOOZPDNE1aNKylzMwaGiciSpL6PDDzudabm5vv/uD7lOTb3//B3eNDRb97erRhx63NT086j7cvX4iQMQxXu+9879sg/NXbt+M4Dmmw2lwrCOz2e0n3bao59Tnlea6ddLHwCV3VWqtE1HddP4/d9YtkAE27nLMkInp4PCKis2BGQgFOBgRIbjBZNTNC4ZwQw2wCiSzM6zknADifJ5Hs7n/243/7h3/y/W9+8i10Pda677oyz3fvvyKHNkFgUxDZHb0ZAg2UJmzbClsOcHdVFZHNBzGqeW2tWeiWcc4pd33qO05iAGZGshgT5b6/ublxgywpNHVoBZmp2zbWjRi6Leics5kV824Y9vs9EQmxqxmZthYCE3VaGuUhMx4c8VJaeLMiYqtqwX2p9dxcmIXZFWpRbZO701o6b+jMaJ8EzS2YPb6SeyIbfHYK2H7wtXkUL7GkcVsNYBagoBbo7bXzw4geEsfrIQILbnYZmIXxwfIusAbOgIkzABMmRMG8G7ZiqLRqswGAgicSdWd3wOXq4DptIIB2URYDCQBF3bPf7z/6+PX3x++B8OtPPsJM98fzpIXPynMpp6PA4FYQE5KmzNwLCYpI7niXu+Q4uxJBypxzbmXt3yUhkbUN6gBAAIxOYAOn3A83VzcDpfl4Pj8+eW3CXEpZoAsrAGrhgCM4siHwgvJf7KoCAMXA7uru1qwhudnp/lTG+Wp/SCjHxxO02pGgg9eG6hiYQjC3BovO1ge5ta9VYIxUmxuvOi2X6bLkFMqNfd+nriNJ0Uci5q7rSt+jLerciESB8Qdyd1DQ5ta8gTpDbEgiQdAQQZrHknPmLIwChvNYxtPp6elJiKdpOp/PWhbxldABRSZndVYkDrbq6qDhhui2pvseNJZlfV+WvzHI2ghukYxssj1CgBjiSpGDL8pWrrW5mrnXC5MCMN+a/JGibRkbr0bEcME3M7OwiY5cLTK/uMoWMEDyEJJe2AkpBUg1RgTN1KstzkvPsjOGhmVtzsbfLwvRoqwAAnQ1Ye72O/zkExCuYGk/HKfH+zK3Mo+l4Ons86gdgzZwJJLUSVolMfZ9f90desqTuwj1+93h+tCagYIhiqTU5UX4CRzdYjMTepfy9f7q9e2LjuXx/d39l+90Khlhmiu4kxMTINu2FWDt+j3fwDjBAcJsCgEVAMxaKVr9x//Vn+3S/h/90R/e7m/fnet0PrWplnkcckfu4ESAYdvYQjUkLXxFX5Nb32aaEeloKQQUXMG1Lc5zW0Zw2RfZkocArotI0+rutFKmNE7DWlVVYjrUFtxvBEFGYiShNXtWrXPRqTT3Ms3lPEYZSUQUxjMOSqqkTm6q1gLlpoiBuVvjSMjp2GKQQRc4pYjgy7oyi/Z/fP3lBLhMmC6v0Rb4L7uqZTZtFsVohOGlsNAAqblbTFcwWDbFoLUYDmwZc+hEOmBwkpCYSVASUSIDZ+ZOZOmTkgdZWWQppi1YnbSi0gEdQUJlO7Dz4fShKsTCtN/vXvqL4zSeWxnrpNQczUCFjAmSEBM6OpKLMBGYKqj3fXeVdzvp72kSkqurfXv5UquPp+IOKJykU2ju1WPXQfBoXDhLTl3XWdO793eP7+90nlEEW42vTQspChFMUc3c3MDE1QDRrAGQ2aZjvEgwubtZU7M//7O/TtBfd/tvf+sTq3p6OrtWVIfkcc0RyMB0aQu6C182+mAdDOvqEwrP+879og/uG7lRVZD7vi9zm6fpdDqdnp6EU5dySqm2Yk1BgKPeUwuYjQIUd62tzkVrrbWGlGAIS4BZCwsZNUHKkh4fHmqtqMaABEiLlChMatbUWIEWhf5IQBAZeRHxBydwvUxb1qu3NGPMbBzHiON1zVwC0CrwG4/tpNhyoe1KmVlrSzERgofbZCESrC1T2uLHHFDBGs0HMbxMuSAYQMiEwvEcdWMSyUlEDADQoh0kQjHoVV+INAFJV2uIuMgiuBm0hgDNbae4GMRTSqkdH47Hx3E8PU1nLkpOmSBn6TPnhMa4OteEVa2B2ED5Jh/e5iqccu7duDV3e2jFiIhWrzQIBQNyAkcHxZjo6Wk8v7/76nw8eW1WKhsgOhMwKC5KoAa8QgzI0Oqa0/l2ZAMgMQBRrC1E8AY/+9nf//j2L7UaeTmdpqGT25cv2jwREZG4Iyg0tAZuCGxOq9RaYEu3kuDydsNaB+6HIeh8hGi11bkQUaSUZZ6naarzXEpxWpOoUkCNTJBJiEytlVpxWSFlmi2Ehmqz2sCszgXMkZZGjTWdp2meptPjE5gzRkJAy7jagWNOF6vRFmVqWLss7q4tgKwOAFsL6HIOEN8uBr5xf7eIENfrg6TlMkJsHZ4tTqiq1RpDZjeLFurC1ls3TBQissq5XPY8I1HxVR3Q6YNSHZlwFZ0mImQWRAdd0tb4tusQDNQgATPXuXAKm2WrqlMzck7NyzCDGhKaGZgGoVu1Pp6fklqmpOamDUwFAYRH16B1JkriCNXF+bo7XF01U0BHRG6zllHPx4ljJwPEbAjX2gvQGvjUatWm8/T09FSmmQzmWoUIEEJ9wKziKo8YPVtyBZD1FriFuxE4oUCkVuSI7M0S707H+ve/+PWbV68/+fiWOJWm5ggsyOhIANAAzFAZnXybBsX5677US1uibGZxmLJIIhqGDgDiAGnjOLc2zhOnrtZ6PJ5ba6206BrN42hmrsbM3dBLzhCWUbCorYzjOJ/HyNo9TNQRzZwGlESIqKVN03Q+Hk9PRy8aq1ZiXO0LlzFcUlXVdNmu5OBOZmZokfNsx9eWklxWs75Opba1vSWBZiag8f0BbeloByTIalvCQPhDmbu71bYFjC1TugwhcDF0WJcygkf7iQnB3HxtAcV0AZZOqG/3o7SGpeRAUiy8TIiSxeGZo7QFsBiPG5E3dbOqBtWOj0+tVBKvbZ7n+en48Ph0X2tFQQRSa+e5tLFMXWr1VoYcvInEsh92cz6zIQMf+v3trZ3PUy2WQfrdbtiPqsBGtrZQli+OCKhoVMAygTM11XEca5uRHMEQAB0DYeCESxSIK8QQngyAjhzI7tV3FRQh2IFgYAY+T60f8t3d06effvbqxVXO/el8/3B82u8HJCQCdVBUI3PBOA3g4tD9WryLyEQAAbtKKXWZ3b2qam2llDpNQEgoUy3H49mbuqPWliRba/M8EywK5jsiZG7aoIXWVhuPp2mawjN3MRJeG7LataBS1XkeT+cyTtEuY6LQUIEQyAqgurmZA7TnFDq6TA7tw1y9tZa6vHXnt1aSmQVObvu/EeJrreK/8YjX0pX9efl7VaWLEttXnXVbaWJwUU6sOWXafg/wzOOOO4IrDd/d3dWdSmu07umc8xIPiIoWM0MmWQ1fYR1dR7qF5o2Bm4MbmIeLIAFoqeM4Pj09PT09nedpd7PnudBYW2vtfDz3uZRJbHBesJy73e7UdTA6OWRJh8OhNatlQsQF85gqOtVzuTzZcLGqNQNyZE7ZWm1BE1nVm5cMxN0WEYVlluy2xiCksBqNf3J3RzfYTBzdzBg5SXd/9/iTv/3pq9e3H7+5ZRJiDiauEZi6gjdwR3Sixa3x8has92651ytXPUBpQ2JHEFXEoqpza600h1prPT8dp2mKZGzXDa3r5nkWoMBrddoTojU1cHavpczz3EolIl8zbzBXt1LKYu6iGq0eX8kYaN5scVIMMg5w1D/6vI0XEdTYxs9D39gFW726rdi2uoNdjsm20+AZILEhdnQB6MvWbYS1U8HMPXexjdxxoT6oVlW2NfZLaGAwMjtRs5oyJ03eagNHQgRXbyS8OBgYZUvJUqqJlHa9sRA0BZ+APcmBhKZWFVXZiaB4Y+aA0ToFkaPVUlqpyYCYOyQGuNntH+/f49X1NE+Pj4/mSJwk+yfwFQqf6vmrh3ubQSqlEQ7nZuwujlAMTppPLXVPN/Pnu8c3X9iV0aN372s9AeB+p+bjqXgSdiZgm4tWY0+MYIwVxk9+6wcp8S8//7yZp344nU4VCBzICaPusW39E4AsKx2QhTrhZmra1DTSEsYgmTtbAhLa81M5gpWnEX/xy78/7PLrF7do1WcKVSoGSAbg2FTdtYjM0yQifcqmJjmR2Xg6tdokZyYorTZzF8GcIecpOwPWuZ3m0/l8yiCH3CPQsYBCr7OOc1Pjh5NRUpZh1z8weKmn+Qz7/f56GGqt83nk1noDcbSqranOs5sxc1KAVsxXW0kzN0P38+kURR0AEBMzW7jcjc0vCZOEIeOaUjJXMEvsjRzAkZwl1DSAzMHRmkM1L+ohsgBAJrAa+LITucvzUfjh4XiZS12GjQ3/Y/5c7yJ/XUZm+3th1iiITdGNwh89ctAtBVqjESLWWh0gJO2WHejWtJHgllfRxQP9eVrX3MSgKfjcYAeh0zjXguZZUp87BU/SIRFxBYBa6ziOp9OISXa3Awt3Xbff7/f7PVQA8NYKpK6V1trzeWhNMb68q7ZmtVKrREAh27jexWi3rebMz5dl+6bxrfHi2i7nia+p+cWVh8Uizho2ZjbHaZpOp1O0tOfx6fb2JkDmGvn22uRh5roWf64Lbh5Xvap45RirB+y26OQOm9BDc4NawZZZ/tJBV1drUBtL0voUjfbEklJiTr7wJ59pxJfLKWL5ttKer+r6x1/7p8i6Lx4fMNQovOBDueeCy29mCLTF9PCgj6dv77J8dzMgWpz9EGHBF8EC/1kvFoYgk3vQavwy/2Fm4g+GzJe3GXGRW2Tjpqv302UGD6Ahcxpwc1QkEnluvAK4mdHqrxHvsqQ9uChwXJ5jpSiXVqf5od6f5WhmzbVLqe+6qo1IXAEA3VhbG+cyjmMacmop5ZRSGvb9sN/VY2lWx3ny3U4NHEC6PtmKhgcXwgARE5gQJOE+ScqcMjNza20cx3EcV11B2uYkcUO277LcSIfLy7Jtj+2xZbrNaj9kImqthkggEALJOE4izILNrDUzcBFh4UXxydzAtDWghSyyLbjQGdh1fUxM3dXqMjnKOYMhqOnK7QZYbkdrpk2pVrBZRKyFbFYjmhnQHXV9bKO4+Jrtolz8zR+eP9XaXdzy+wVwuWIrbK3jmXmTnjaz6IcCAMJlaeprV5Mur2psgA8wqFso2pba5S/d3XxRkFZYToMY1z3Xph9WxpXMVHEd7nxwg+OVl3W/sISMl/MusG6IAWQEd2cguMzHEAGgRqPXTaMjYGBmpDZPk/ksjpJTtxtSzgZgtY3FvGGrAIAejSdilITIiEiS9vv9fn88NQcmAFcDZ+76TvrhZK6qrpUpExmFmjNKcskCWZgYQjnwfD4/PT09G8XBgj1U1a0Mij2gWzBzsM3NHMJfDGzpeT+340SEHIDIlU/H8bNPv8gih313Op1Skrgv2x0R5vbcfwMzUwdcw6S2pu6ZKUZagb1lZy11C22A6A5KxswBV8lGgAZgqI6AWVKX+pQ6IgkEG/kSen3pNwbj3MDRLlb85d6+/ISRdXvw1C7AxQDghLxoIS2cHiIyBHbcwjFsXn0fJAuL0BvAB/F32QCX59T2z5dH0va6Zubq0d/ECyKYr3kIrVYf25ecFQxc3YK8DOZEGKbhfvFR2gVAT2HxAaAVao/rF3ZYVgmtH2nRho/5w7qqoiaGZkjckez7gfrshFrr/Z3b3EoxNxahlIaUupw6zomS5Exmtr/eW/NMIpnHWoFl2B206/B4Nq3uLoyuTcgkS5eoY2JahvI559ba8Xg8nU4RtIhIm23xDC9QJ0Sk6wV2dzQwWA4HlgUibxosPVpL1a7WGdyY6O7u7q/+amx1+tFv/xARSc3MFNzMBJb9FhA0DlNRQAvdzJVOpVF8PCfkZs1KaWVuzYCAAL25h2JDdIqAXMSIamtmjil1z6h1dy1atNRa/UIwy5uH+CQAMKxCQ7Ga1mYaM5uDmbdq7m4KROoeHr8EAAbAi4JtcD/QHaN7YGYhnLn1yQEgWFOXqxf+oYfYxcq7DPl2sY4vN0BoQzBzNMIXLthFEvKMPnF391La0vUD36ZNjKhrFPRtHonoiAqhUrQsd1UNRz3EBc6qbrhtMDUHcIBNoxuREksW1NrEEBxBDZt5U68N5tZmn896Ps2tOWEWzkwdImtzVFUHFCBhszbrNJYRW5Y+u3mZ6zRNZkYIAl6skmPHuM95SCIMqrW1Fow7X4/By6u3BZSvlQTbz2aGjiCMMf1d2cZwEYlKmeZxkkSpy3Wevnw/vry7/V7Vq0MfJK4tamptxcEkgVoAEN1DoPXiiF5bJYHRty5XrXGAdNwxoLaGjoru1iIpIgJmBafZqz5zk9AdycncSqnTNGlttVYzYFjCc6D0YlFu+21L0yHE6JesackCLICB7n7ZXVwxI5tTJSICIjE7gK5dTkK/xP98bd1v11MibsZEKzpRsJDiFy48EcUv40sKs7uHwO/zZ7r4Gdbzet0vLarkSIDcnRxtodVGG/RZhYaIjGEJ/9HPMidCEQHyJSOqCqGhAth8+eWmiUdhmkIArmiAbjqX89PRT3ieTuPTcRzr+fH89HDW0fe5I0qIBEDzPCsTSY67Nc6Tzqq1+b4jVWx6Un16eATzhOBaBDyh9yyHPh/2Qyds3lqrSnm/30/TFBy8GgQJx604+1qACFc59GUPQ/T81/kGrjLlEKm3ew1dkGgdOajq+TR99f59P3xiralVABBZ4nFrRTiFfpaattYwGsdIelGMRjPA3Xv3sU1mllIeuo4Bx3H0SPsTcE6pATMQslWzhqDEnBA5NNCJOGrl6TyeTqOqkgNzijKXGRBRL4ZTl9nHJTUXLjKlbT98bfWjMG09mwvreSvLAPfC7xcuXhAvo5IHH+Br2wLXHurX9kp8uL7vAwXUbGmMAIBfVswf9i5iS4WYfaTwcUYRLG4Imzb1AsbOxMwoC5ZCaaEEqDderQufJ8gOIgLuRqQBg0MIPF8nqUMUA2zWprmB12nyuY5P0/FpOh0nbuLCQokpEUqz6gaLSCqRmc3zzEC7VK21Op2f5nKazgTGCNBax5ABslDfpf2Q+r4LQPtx9t1uN89z0KZDgEk4XYhrfIC2WsZesIg8xyNcN5dzgC7yEzMiSiIi1Fqbpslae/vuvfzt3718+UJbsVYlEXPPgPwsUguu5uaqygEPYVbV7R2taUAemPl0PiHibrcTSQCASM1UF0Udg861oZsyMyM5tCH3nWRmYZa4blptnuvxeAQFRMzZRIScyQEQFXVbS9vSQsTW2tdGSbG02gUVMVY/ADQ3Wp++VcOxB0LxwS5AOqoaOwuWZ3+w2mULMHF9v3Y0A8ByC2X9emt5ZEs1srRBN4ZbGKo+7xy1lDIw11rcNCfBJEpQbCG/+0XehaHbIezu8zwTsOQU1Ls+72JktuMhtkSfu9vrm4fpqbWWWfr9Aefqp9nUO+mkaUICNwirKgKvrUzz0B0mKmyiRU92vksPu/1+dxiql2aO4sPQDcPQdcPp4Xx6Ot/sVTgxEwuCGbQq4DkRmQ0p3wy760MwQxIKIuJHu1e11k8//TSU4uOqJslbUrTISW34QjVmjoNiGAYAmGuJG7lp32LABiPJrqZaVQHRuzxg763aLz/99fVfH37wve8dT6fDbjgcZJqm3b5fthNSYNHyclou08N4iy6lrus4CZhP09R3u2iwlVbRAYWHYaiirgBTqaquK+K61ToVerELxQOmJISt6DRN43nOnOc2t2poDTP5YqPhObGvQyffEhhAM+i6HA00Iur7PnoJYb8FiEAcOBsDIKBaNCViYaJA+y2ML4AF5hkKOhGszS4W/ofp6DMn+PKgudwA+OHj8sS4OFnga7/cNnGgxjeFf1hf83kXfghGB17zH1ngdPFPRASEoYa3xEt3ctj3QymlQSVzaG5YvbXSRlZsRmTh7kROgA4dS2osYavc3M3quTzeP2FyS0oDSs9dygEqZmDbRFWbWm0EnphYgdUJNJMPvRwO++vrQ9/3KAiEw3D78PBQSokiuOs6+LAC265wfP3AfnkYvyGaGa0AfbRAjaJHBzr082AZf4JjULkUHNVLM2Te7/c5CTkYLJJpS+9hg6g4OAKad5K29efr4B/NUg5GUU4pARrMpSC4e4y14+hqzaw2UwXXMreUEndJgtiuAE5Z5DgWbe5qilprRHckesbMb0vucqU9H/KrQUFYmj6fCUS0zW3XgyRcr+MVNouny1V9+fjab8R0ORe2T7VOAGhxC3YI4UqAMLK3/6aC+jKQb0dYlzMTA2JIeRmEcc7C+/jgQbiYQK50R0qEHMbaAKExSOSwFOIBT+okAZkIiCNh0gZtOpZppgbsnJBATRWMkAC6rutdO+nEsVafbT7CERFP02O6SrJnFu9TBkPCBE5optqii4XgmcEYqpuVaUjdrs/7w+76+vrm5ir1CYQMQVz2+/1ut4tbGAejtudzHOCDDQCARJQuwpK7B/459rldCG95kLDX5y5FMxqAPz4e56neXB+YogSMzGeB/hKsvnGxrwCYOSmHho+qolm0TUhdhBbFfffGzRGiteqgAGCqrczzONV5MvWH+/thGBglnOCCo2cKtQaZHc08fkanxOsCWgsAWFNuXMV8LsnyW7BYhp4XMAdftS1IWC6YjHGGqKpbg2fw8xK7v7YdPCbBX9uL2wv95uJGRPuwmwFfC+cAcLGbt09vCMLc0B1iShcJ/Aerf9k2cQ1y4pw4UVD1ff0MROQWN81U1VQdlMwFuevyDqWCnM91fhy1qKEbBFvPlRET97m7FS5P80NKZ52saHF6cMUJ85hkT5Jg1w/kBIYE3Mzm8QRELkTQwNRa1Ta51hevXl73+8PQ933mnDiJEoD7+XTe7/cfffTR9fX1u3fvSikiso1A4itenqX+TFZZvUYizG4H8uqVsmwAoO0+WuwTBwJ/f/fwcHx6+epWGFyLMAtSXQXOKFwrHbZCYxvnL5oLuESfQPO3ZkRNwbV5HENGa3NJtZQyTeM0TWA2zqWWAMVCzj1s7RaD596JuqOJROsJ3YMpjlvkRcRgE0bFvH7ZFkERABAIKOzsGBENPC/Kdrh89KALqG2fENbhgKouleOHS3+Nqhdwt8u1/rXHtkP0wlf9cg9cnGvPp3z04w0gCq+E4NZaQLDog1fe1jet0tsiEoJ6tt5+W+dEyxVWba1xcwAg5p5TL7nrvJHMauAeyFMAsEX+hpn5uu/G3WmXuiMc59YaVEezamM78Ui5432/73Nn1dCIILk2JjFXbbWVCbQMKeXUvbi5PuQ+TKmIyIEUwWBDstFzrHL/EAL4nGS6e0Dt48Ec4N+1hUrLVcUwIom/5ygGI3uM12QHrLWeTiMzd520qaWUADXItYy0IQ5h7bvXIKnEpnKLXhww16pEhZmbm7u21hTWuSwioKtqK7XV2kpxNRWdpolIRHLmzMwpZeayDkYxSu2LL6tbE/biVITtgkSQjSgOq5kp4IKNi0yYo1G5vsLyjcLAd2OzGMCFttW60p6hEPG/8g8u921l+3o/bIOIbgXuh897LqDxA+WtmFg4YyfJnVo1VzV3XM8HuOiFL2cfPy+htUcSMA1XVVr4XwAArbWQqSB1rS20oEDNmjILtkXzZrkQqmjMSH3Ku67PkpQUzb2qmiq2uba7d7zr94fdvkwNDBml7xIyKXrRmojS0F8Nu+t+dzMchtztd7uoIC06r4C7Pp/P5y+//PLp6YlWgb5W9fnb4QdKOOhuTasuQHZmrrriDtSfF26cFAgQIgOLI+5yaxEx9f04TwCQc4Y2M7PWsm6adbKpFhsAAco8N1UIShICrLP/4CmbWShNxircNrOqmjV3RdNg6DawWlX1KUCvOXeR8KSUACiOl9XEnswM0J6nEBcFgK3ITV/B+tE3s3VOvARKek6KYP3l5odnZmkNPWt0WC77B42Ziy0nazbu7vZhl5QQyUNk3Td1E+eLl/jNeEZEbkYfwigAgKMqB6murE0/zP/94rMuUlMXG2LtsS53kZhEhJ6J/WxmVZuV6pxyXTBIiRhgmdkt17dWY7TJwTyn1KdcqbqhuVttIDi3+nT/9DA8YHObHQwTCTNxkiGn3WEAABYcJO8ks/oud0EbhyQVoLYya7NTOR6PD0HwW6/Mc0m39bPXB28Au5X6nFS3Gfl6hq3R0cFwoX9DXCwi5OWAfXo8TWO5Ogy4Yd3cwRwYPGDYqh4rcm36wZp3rdEKVsh3BwwBaIsb5KsQZ0CLY27jqssx0vSI5A6h8qm1pdSpThppzFrKt9YEL464D9dia62UEkn8xtlV3fR1LrtGoOvGMLO29pQQkVYes16cMxeYQ7i8ngDwzAe4/O3X/hQuJhd0cZxf/llcRDODrYDb7veqdAvg3MI/5jfe5sP2iIKLeySR63b6gLkX8h7e1NUs3DzVgBMAE2IniYDcn1VKoz9htbbavKkQ5cSJWN3NVE0FkqsFcayTjp3C39BaS5l3u/5wc537FPoLXhpWzTnnLCklJWytTaWM83T+7CE+Z9/3p9NpmiZE7LshVOa373i5N+IHXEX4Wmu6zWEuwCnLXdgY9Ftjh4CIWrXHx8fj8Xh7c3AzAHa1ZylmCFaWhlTHFmOeL/56y3LOQQwAhlpJ3aype1zmZUyLiMsGYI4GZ5igxaRcKNwMeQ3b/ryCm7o8E7W26wBrdh14/dgAy7PcLrP07Ybqmg/7mvPgxsu7yFx0AVN5dIcul3Q8pJRpPZoXq4T1yW0bhZm56pIv2qYtCrjKoBA6Ypy0iOgLpZmBENGokKNrqScDlp0KeOZWWjVLhIlVwBkxASTAHhJL13VD1wdeiAHcPEh1nCSlJIkNUd0V0Dm9mrrj4+P8/r5N07Db58MeVV0AwVu22pTA3RDMWTGpVz4jtGFo1y+5ent4Gq1ax1RP0456npO/h9Ja6jsgm+Zjnrr+andz/frNqxe5I9Myj09lsl23i35l88Yt5dHhy/H01fvpdAambDxwstJcTbqsYJSW6g28AgTjyQl90tndKREnntvMzChItt2iiHkAAOEiaCupYsvL3aBZE8kKkPvD1fWL0yMdj0/Tsd7c3HRA4cFD7oLUOMRuFYQ5LUiBaZqI6CqllNLJS+73uM9uBqaZh9rmeZraqUFDcTJzAENB6hDcB702a6HKRYrNDISIGpj3OfU5zWWa57mp9qnnjqaxXay3pc3oDm6gzWZbDJoI2d1bVXYgAo7sBA3cODEytbk6OSCWVgNzzswgclfn1lprtCT/IrRcxOdaa9sAiDG8+drs9sO65Dci9XO0/v/zRLg4Op4rj+ejBlPqXMCYnALaueRuF6+zoAZifExEYK6tInhEl5gut7lYUwIkzl3K0d7uum4+nmEtldBjTsfxT0PuMmc3nmsrDc42qRozR35aWp3nGULMZm0tB8JHhFGQwRkJHSCYqd7UfJ7LNE1hHy1djlDadV1U8HphvIDP/R93X3pacWNU/4Ep6YdXckFPbZFvi+XzPN9cHwCg6zrY763NvhtgPWRoxZa7ti0F3zC827Jg5qurq67rckru3nPSbEez8/G0/Q2u/cqUEpiVQluCcRmjcfUikMWqYvGauNRh+FpH8TLD+driiRJum6Yj00Zyr61uEi+4uiH95uNrK9nXhufXi2DccJcfIiAulvjzz5d7AD9Yu8/vAWvbLo4Vd0D3kO5pQalZUx0EcEMkj+PaHcGDTtxgGXSbNVAHcWRBAje38VTmcfK2VD9EhCIppUoUtgtlqo7hvsq11swiIn2/c0xzbdMUyMdCSGZLBy1w5+JMRKUUhMUqVIhFkiCYahkndHU1b1UbTucyjvM8z9M0ZQQACCvCZrp6s9FlabTYgi+JzoIDo1XF8jdX//MPGNzhFVaxIH6xlAoA79+/r7WmLptZ3/fu7mq0skbNLEDssYaif7XdtZgNd/t933UiwoDScZ3b6empTvM0TW2OqeqzfSqYiQDAcpKYN/f4Ckvuyhx2l4toSK01XQyt/sGt/rWF59tWufDjCgJW/M3cartQ5Cf8utDJ9k+/uQcglAgu87B/8Mlf+z3+xuPyRbYdvIScsMA2BI+xCJhCXAtVUG0uQCRJE33YWHKPnQBh1hBrAwESQkJgR1WDquU0zuPcphmFvbZWKxsQUd/3wzC46pOdWrPUZQCKZouIEMluB8MwdF1HwlhDp3G51nOrOM9mjEwPDw/TNPnKrxJCBVqopQqttDZZrTaf2zzPWmop1ReaUjQE50CsmkUmE8Z3EFn3ZTsYVuDX5SkBH4Z/gBWwiMsGgjWdYKTj8fjv/+6nP/qt73305tVUy9AlWAapy+vYBtliyltOHE4tK3YgRD0Y1vzErJU6z/M4jlZaLQXNN7sXZk5pwdvAc6HYEAPKzyIisIDn46JFcr/Fgi1rX0PB1x+XfxBLUVVDFSrerunSqoozCunry/VyA1xezOcNsP3/ry3lixPtEintsE3mfuME2J643cIFJh+6iGirya03bQre2BmYLDFSAhLk+CjmDRzCIxsgmskugMKYSTKyN/PSWpmsqJWq1RiwFa1UgZABNZpfACi8tA2JMUnXYUqJSFLyruty14kIIDY3xNA9x3V/VkSsWh4fH8dxvLndE4qb1bnWsSKStjaNZTzO8zjPk56fTtM011oDDbWREiNeblHKL6Za24W9mP58vTUOHwYmX0sCjJblktEiMz89PWkp90+P3/r2NySn0lqUogDQWouaaisWU0qllKoKAKE6jGEdN82dJJDkaqW1aZzmeQ4oaSjfLEQTpiiCRdjMWIK3hGtvKQCFDOHKSBRJmqqWuf6DC327Vl/7zdZrsbWZAfA8CdkSelx7qSvT5IPHbx4C2/X/QBcItw7rh6v/gw/kzyfA16ZrX3uD5Yf13c0spphLJaDhpWyuHMa9SUSIZ21mzZUidhEAEjAQmAtjzykjQzOdJ5tmaM2rQgMBzMgM0SrCSF3i4xk4UDRPTLq826VIzHKGYb/b7Xbd0J/mGasuaCre1FXN3dno6fHxeDxae4VIrbV5rqVoltSKnk7jw93j+TiWuc3neRwnX2WQN/kNuOij+5r0bxccL8vBy0T/4ofLA8HMwrxuu5KLXlpsWpHj8cjML16+fP/uS0dwRI2bsHFr1+lBW40dRCQ0CkqrcB67lBOLqdZpns7zxpAkBSKPag2dQiWWGFiQGq3LRqN5SkQxMo3niiUTR2i+Hjz4rHXp8Nzrw69vgw/hZ35RJPzmE+HDaP4PruHtD543wOVzIm/52iHwvPovHl97y6/lUR9sANuYUGaO7o6GYOigYA4BW1PH5kAG6tgs2rNEhA6y4kUzckKCZjqXOo5WG6mVsXg1Yu6kS5xw0RAxAKi1xlnZzIo2Jkgpwf+vtS/rdiQ30osFyCR5WbeqS90a+Wn84Oc59v//M7I1ttVSb3chMxNARMxDAEiQrCr1WM7TpzovcwdiQyxfqHl2jUPnPn/6+Nv75ZqSvC9GO9q7qtbuTWJvr++//vqy/il9OOm25eWac5JtSet1e/nl9Zeffr28L1ZMRJtTvLnhm5Ry3ERofkwAsEHjjzPaJcsjD/jJpfX3BQCE/W1FZA5xnuf//X//z1//9rfPf/j4fnkFANvKqKUNwdsiSUuZdOrvNJS2bbleTdWKlJTLViQXUw1IQobMaMBEbsd2+iMi5trgqFvnqlqKMgsORDxS0bj/aBT5Jn2gEMZx6CYTPng/x2fdUeOddr3RAL71e40EfWvn3LPpo5S62QzNkyHcI2EI6rVeamqKAtk966oxowGdAmj3AxoauC5gBQYOCpYlLZtcM4oy0nLdzCzOU6AYKKIVVTG1GGczAeQQUMxSEROJ07QsCxAyR+R5nufj+en4dJqW67Jl8whGgzMgAgYUhev1enm7rGtKqWxL2tYkAm8vl/W6vL68v768b9fNO6GjUUrJdYhjC4CRg/u0EaiOZjQCM7yNzPcpGGXKKPPgVtyQtmbxZiDq/Q//8pe//K9//8vnHz7RFBFR1tzJyxBQFRGLKjXjxFMpzcwLvi5bSrx6gAVEtdTGzDsxNDunpdmA4w85SYxCvXGmIToWCZi13KRbvQeD/L0jU23FXEb7URtpmqivB1SV742Se51wdyj0ajFXhX2su8vpTsXcSfp+0+7poz3vAswsF49okBmqmEhSMVVxNFlSs6TbkpivYBKmCMfDRAE4AIKJAmuceZ6mSFy2zUTzki+//JaXLQBKymDx8v62rQFM8hLPT8fDYRbNAMocPZUgxkghgsfpRE7zCYiv6yYA5/P58w/frzlvqaiCS8S0bm9vb5oThUDEWuz19fXHH38kAzBZL2m5XkqS95frr7++XK+rpCyiaMTMaqam7iQ5Ho+5FEykCJ5kBgBecs4tIcKTLrsowR4xDXuM0n+vyYUBW2ZBzfAi9Lo5ymXLOZ8/nX74078owh/++MNf//pXVFUwEOg4GtIg087n8+F0VFW3zrOU9P4WpsO2rPnqxZ8IRirinjkGn0eT2ogAQwhq3GmmNilqIG2dPKg3EGhF0tQQMbDVrffz4VaQdwr0+hNrDlxHezazrDUa3TkK23rgi27WzqJVA9wxxChsvrH1ifmHZ/pM7ZcJqCoqaFE0RSRU05TzgmigucRzsKg0ITB5fY1lzZoNRFMCQN2yZQuCAQgE396Xl9/eCM1E8OOH03EGLw2jCqGhYGhG3scUoOau9Zg5huPxeDgeD6cjGE3T5LkJdPGSbNNccs7LdXv77XXiCKrr9Zq3lNbt7e2yXrdtS1akgRMrINbOMffR933DYYNBiPSjMID29OmogfAQnRoIRsMJpmkyEGB6fn7+/o8/fHh+Xrfr+ePz9bLhmHvTnjjPc5wn71sBhKVt4l4sX2wAApDmokW8k7y/zS2FuK2oiF7U3Uy1UhDRvfVE1DNAv0hIo5Vxd/RrY/goNfqfXWTgYD3e7fTzd3j03/PUb7/f3a0b57mEILd+zUyLiimoMnFtU14kLxkNTCJekoYCUUNkA4RCWXOGHInLukRDzAIZAvBMIQb8OS2vr+85rTnnQHB+Oh6i5JKmyetCzDwv1jELkaMXSpsiIsXIYT6dn87PH96WNYTpeDwyIAC8v75l/wQXJ7m8v10JmAxSWqWU99e395e362WTJC5VoALNhhovYjJ1aU3eSsdzhluHhB3maLRq+nTobe4QNAbYrfU+wgYOzG9m27aht1w/HS/p6m02XfSqqgxts6bD7EBACuZpSDUYl4vHHM3MEX7ylqp/1m5SuAYxXV3+LdJalVgTt9AT482MBmE43ufRjh+VwN3g9B8RkVrqTf+RhlZI3Z4cL++Eih4JfqT4b/BAf7wO9ZPjoUeWqEeNdqQYUVALgTgGAcnqQS4xJFvVJtGYiwIKg4IxqYqRpmsuqkGBBA/T6cN0NNW/zYsqvL5dzOTpePj4fJ6YcvGIekFEgYqVAUaApgiSsxgIBC//5RDiYQ5TPB5PHz88ByQp5deff1lr+hGboYita2JcKs5rLtdL2jYRMS8jVlRzW8NEcY+D1iHqlNIGapybu3G7G9I7BxEQEnqKPCBi71KTUgqRL+v15eXlt5eXz//y2dsN7sgIVlHJQggdXB6Z3JyS1udcs3p1tZkJgAnknEEVm/sWAFDHjzEkIEbWzgDqtr7vOAMwM1Zos50wOn1/UQN0Fng81MmXHEZ6OB+bHX5Dey2X7vER95Fgu52qr213PDcaTqMZBwBGuOPTK5IBasWaxgolEK1CXACIQlJdSw6FBGmyoEgBEDBrWq9rVEDiGcMpHs+nD2h2Pr/HaSqlXK/r+/v7uqRy9DZSBcnciaY1H8uYKOekAGKgaBgjUFZVp1ZmjvM0U8VJRkRTTSkd4pRSWq8LAQYMKlJS1qxWM4XNoV5lkFrWPCTd2O0z0akfmz//cXZhYJKuyvc4sSdY2g4viQiE+PT0tKT15eXlf/77X/71v/3rfDpe1sUQgUhHQUtYYThiGAWkIvgsOLe4hilZta0Gd3l8KyIRcVw9qmqHanMIcqYYAoSwhzsQ71f/I/nd6YDOB+MAdgaoCPi3RiN8SRzfmSe+3XSIeaTvb/PAHZN9/Vqv64Sqsvx/6oNljpSMZlZArFhS3aRwJoVgpGSIYAAlixUzxXAIc5iOh9M8H6DI4ekUD0ekkHK+LOu6rgCfponBCyLM3POC3gfKIDvwGJKBZSmWUnIAGEJxgHnec9C1otG7J0RELEafRXJwBwZkDow1bd8j316EQFSzIqlDTw4z0TXACKk0TsQ4I3eHqlSs/ONsBmrkdsiatj//+c//9vZvx9O8blu3f1wEimloFksF7RLJKlkFfIGUZXfRisMsKgEK1C66XeQ2AtjNOWhp/S71rQO2GY1ZHqPsh+4pfiCtb5Ofj4YnrfTLHxdOj7f1ra+Db1Ihfs828u74CwyKafy2Mb2hTzwoVlwXUTBABAIUBwrbpHCJUZE9a4g8YsAYAk/Hefrw9HSeDh9O5wOFNS+H4+lwOIQQtjUty3JdlyRlPhysNdaofgkAVjUSNCAm5GAUpal+YHL8+GVZNsBlWXLOUPN+GQBylmVZDnGyMCMwgKP9EGEInunNgTmZ2ZoTQIVdsKGFbR8xvJVk2iKXj4oChnKQfpXTnJlBkyfOAIi4bRsiTtP0t5/+/ve///3z999dliulpENLc0Nwv5NXlLuQXte19/tBrEiMZJC9tEoUb+tj72afGXunFSmacyZyYEMQEVNU6PFartGVW6KEwYs4UuoX6biPQ801aj2gRt/OnaSHL1G///uFNcDvEf8wiCX7kgE3nLcHFtCNY/DyUwJQxSrDQFUNVEBKqU5oADJuqH0wTzMbPB1Oz8+fPk7HpxjRe05N0zTPPEVYwVNWcs6qkwt+Lxx2F5sjzxzmyCFgnASCIZhUWyXGKMU87O/NOq2mM1nO2exqohNPIUwE6FAx5G1sQpymAGqsaGZJig5AkXcJI8NQID74LuBLkRobrKn6I+xQGojo7TVijKXkEMLz87OIXC6X8/NTKYVK0TGVv5npveYwpeQo1vVBzmlaDTlVtVqruQv+x2/xZsS6N6jzOqQKBDae+UhCbZz3Rf/jCXdPHMfQh+VxnO/E9Bc5YdcA2CE1XSy15pJfuqn3BKpNzAaurYZ+s2zd4wFTdZ1ZMQVUqw0STU2aCPQbEAEgEaSVDzNls6sVr/2fTSBlSsneP38Xz38KUtJLyQR8Vfg+nj6cAmmWkiA8XV7z+6/b5/MfS0mlZDUDQg6kqIYqIPLdeT6djtOxpHK9rnbZLCfOQsQZdUvluia5Zk2QNQJA5jcCCJbR4DW96Rse4xyIeT5GYJEcYghTAFRNVnKOGKFkKwqg5+PxdJ5njG/v7+io74AUo3mvPJ9IKW4xt5BOa2tel07mhEdI3oXGABTEDDIhESFQQgTUQJFjnBDef37/H//93/7rh/8CbzL9lNcVAZgwTBgkZ8sGZpEZr5rWBZmR7EgzoKWSzYwwMMFmtQkiGAJSyoUxiIqal7iSmoqhAhUVB+hGqP1iKRAbKQiCoqmCKaIiGjGG2p6+QwVX/C/mHiLo1hpVrDS2gU+s2lR6PB65gUE0u0tVFVtVYzfJnOs9qwZqzQo5tBHAQxzgkXW+vf0D8f+PrjWz2s22bb76dBklKUFEDoxoRPzhcPZs9azJCEHJzN7e3jyB2VMac86Xy+X19TXnDRE4GAVU7/U7h8Nhfvr48enpaQ7zel3LVhI6IpqLEDMzaKNZx7G/q6gVB9uYEdEMpikEmry3ZJEEg1aEXWabW9t+G2x60H63poUHmfcoKYefLef8+vr6008/Iejb21vA+4RfbSVXvjjx1KpuG4wA4qoV0thjBY3CbmZweCf8ynbzXGqm16N4HnMo7n4EqPqnOg+aV0dvKxCJaEcMaLfqUHxmN2Pub1ZNoFHb/s7NBnP//40HKu/eFhh4fB48b9ZA2SaccBbE8Pz88dOHTzEEoWKEOcu2bT//+OP1eiWieToyotvxr29vKjkEno5e9yhmcAyH0+np+fn5fDyHENBwvawhpELSfXNeD+C+i9rLQZvg0Vxb40xKRIwQwjTHGAKrFlgF1CTvHXjQXTMGTZKR9w+sYbI2KQ+R+/vtK+YTNe+bk5iFEAIjI5QUtm377bffYiBVBb65EKC2OHC5SyGgVsgJ8EjeDtuzpx+rKhr5L2gwMgm2DIX+FOd8d7/SUM3oVmXk4DXPANU9AeD/krubnQwRXdU4qAj4h+4DgjiUWOzPRawN6fz9/fxhbbAvX/s777hA/di3OWG8/p/kgfqiYPaQcOGSOFuWBRQ1AJLNp/kQQlABpmiMl/T29vr+888/L8vCHGO0hpkOpZTAu8eN2XUIhRAmnnzgHOx7ittia96SlJJzTuu2ruseWvcKHgBQsyIFc5mKmRHhHGdmDr4GRZYQzHDbNiggIsgV6NgFphMEEHqcuI+zwD9mALiV/Xu0vzEA7EsIDCE4KO/1ej0d52maOlAPDnD2HYOfAEANeHe2OlxKL3zBRsGdjHo29x1J3AlXfoDIriAGsBPMKPL7vN/deWCA3aHs1zoPdAbwJ2KrzvEg9E1KIpA9aoD+d3c43H3MF7d/XvyPt4JBctThIAMAFcmbChUgDvzsuZxFyxQnRdjW/P7+nqQQBffhQKlIMiJyOB5FihmGEHgKwFIzH2udB0TiOU4hBMlyuVy2LeUt19a2uSDUWmxC9KwMNBDNWpJqAYDpMLldVEri5tSXlK149o0SESUGJlFVT1VAUO8QVR2K1RL6xrD07YEBqFa3VC+b5ZxVIBA6cVyvVzA5THPAG093t4wdvxpFkAEDU+AWCLMOSeKeUufkkvZyTZffWCsu7oMD/qDOAE3YV03ioHePZDNGJLo4H3kDBt7oMl5VUavQrCSEwRp6lQ5tu6yu6u9HdTeBHgf9axPzKP7hP+lLHeeDbKf+nQFUvYFuEaUIoHSIc+TJc7DiFNctX6/LuqYpHvKUwxRTSlkVUXLO65qOx6OIBCOKYZ5nQ52mQ5gnRmJHE0E2Qy2Wc87rJqmIiEoWyQZCRIHNFNG4LbYAREspotlA0ONfaJYb+pIBIhtZjWqocCkOKcnM02GWuroQay1MAMCk1BIxAOvlYlChYnwfybHPEPbAMNZO6la1QQhsWmFlPcNn2zZGClNrAdFCSj5lFXZBFcQYYiB0l2gQ7GvQGGOk1ivRco31DdSPiNB7tA3zyAzW3ZStO3dVPioKpg1sy8lGTRHqot9r0qAiFA1kafsGQ0cPbEReCQh3BJqvkeWoZu6hEcc/v7b9fxH/XRp5b+zdY0gGaqUUQyxFDAE1RA4EGLGa6iLi2Dvblr1zBkBNuPXWgtu2bdsGqMwcwmToeWoQQti2xBwCBsllvS7LsoBqjDFnCZ4ng0iAkVEVzSQgRvdOiDmSoKqKlVLSdDofDoe8bmlbVNUX4kq1cxQiKkJADCG426c4Uo/nzfV+H+Kmal3jPYCm+ghjRd/YzYDeWKQKv2maclJmPs6zuwqgJeg54TIBahWKVrOSELEa2Z0WOzF5QqszACKCoi+NvE6oT59pu9aqO9JxcAeXaF14VNArAVMz1RDI49gAYAZSqlpAIgQGAxURUQo4WkeDGVZJ0e7wAm9jAv1PRGxLixsi/4IX6PfQ9D9v+UDngWbD9aFXERMsgFZUCQhx4mCioAgKppatvL1eLq+XnLPV3oV1mgHADItpKSVEojARkaqklGKOYLS8XyaeIoSUyrIsaVnR4Hg4pDUDYaxtiQGUGIsX7xMgI1WUUaj/pbKdw4fDcQbQXDYzM8JpmgQrktT+PYjA5PZPFXjD0f/UcA07+/AjItTGdaXzoYc1sNnKTjTOAO5q9GwId2Y7rasZM5PVELXrPW5wze64HCe9mjr9W2AvJ3IdWH8G1zQ3Vn4/s99Q9ZavmtLgAc+nXz4yAHzpPl0/2LAO6dRf7+EM0PXdyBl3jHL32ePO+Kc1T8J41Jq2EtP9I9uIMLv9fGNKhcDLWoqUeHgKhJZK3sohHtKyEsE8zznLsqzLsr2+vJfXiwNMLOuqIofnQ4hRVbPa0+l4OBx8Xk8fnhHhx7//7fnTx21Z81Zef3u5vl3zlkopkmSOsZQNAQ4xIB63lFSDznsjhhijV7pkSWLlsq3HtBCBAYRpsmUppYT5kLd3IOSWtQkAGDgQFlMtGQC49bEzgMhMMWgp6ujkgQGxYp7WbO06Mr0Sdq7dJuua0gFPOeC2bd9//73ktCxLdQrPMXLQXGNeCBhbyZuIxBhFhBC9w+e6rkUEEaW2way9Uwm8AxcVq+36YozuZqjAJFQLFVQqI23bdrlc/aoYo0hVic5Ua9qIKIApWADrA2VmLnwMvSRJPU11IKfd0O8PxcH72mi9RrFqdnfzK/XUjMYhD4vgr4mcLpu/eM4de3yNbfoNK+sPq15sOBmdd7H2D3Zof4DAbICiWkRzEaScbb2s18u6LBvk2vEXAAJPrSEPhhDASETMAjKHEMRKyfmXn35djmsgXi/XbdvMICABgxlOLDGGkIMqCHPhEpVKBvNebpCLKREJqICGiBARY2Cv40cUBVWhwN6os4v5G2FBiIZEJB2q0io4J4JbEW6s+4/U17hO/16UjIgO7uZu7zqqLROm93t0mIsQgmsGF3xVG2j1iRERB+IpIpOlxMw8VQj/Ztjs5n7TWjXK4ef0zk4AO5HgTa7rjU0xrm57hf6jcugi9Wu2xq5hXKk2Me+1mp30+8s4A0BbjsMXTaCvUf/4yyPpP2qMfvKoa264qOVyhBAw7GJJVVUT+bIATEtRgBiZAPOWyhYkRkFNKW/blrOoGqO3pDVEnuI0z3MIk1nF2HGM4AY5aCmVvF23ZSUiSSK5kIERMQIFLlHnEHMsqhosRBEEUMmGIKrmhs0U4jFOp+l0eppPh+kwoaBhRqZiWqRMDjWF1ZZXBF/XWwtPKgKV3UHeZ6hrfxxWRHe+EUQU8ZLCngda9YQL70B4OBw8m9W01B4ZzW7QlqSJVCmGmWMMYZqQvdem0i1EIRl20JHB/NjDahXvvq0BYPBIdlME24t2W8Od3z5CIdRlvQcEVLu8dwL7Qn+7Jhf2+EC3AJ0BxlCGJyUQUbvzfp9vMcBI8SPt3tF33xmP3u7cXNK5tpuhFAMMNc5SHLJ4Z2tTRbW8lZzSlIMwmxgATdN0fmKI1+2yrbhh4Kq1YyCrIXpC5y4sLSkyhKAKnm5EhoaARoiGSIExhBCJCnMopTCaMcdgZmomZDTR04fTp8+fP37+7vn5+cPTcySWLCImrrjB8x2aMAO3dRQUu/nHxGpGzR9CDWPncRnaZW43A/Ah86pTtyOvTIf5dDrN80xEgJxywYashgSdY5D2OfUdt15ERFLqMlJEsFIqlCIuVr1MrFM2tGxkHvpvd3Ifqb995heKFTuZ7ZrnluIrJQ3emi7Xgfd6y1KKKdqwAOh0KCJuHfUR8+1bDHBH33fj/kUe2G+8n7nv75Lg1rIad0Z1QYbFvDRbNZeSSlo3wpBLUYEYp3kKXA5v9nK9XjElX2gCAFFIW9GTIqKApZSAPGyOM0/iFEuEAFrEiEAdYwoZa0IUEUViDphLVBBRUbTDPJ2fnz/98N3n7//w3XffTWG2Yvm6bVsGwgLmlOGwKKIKIqAChAZQTKX5W7j2qGvLZSYc6Ls1q6BhWMDMk2aRca+uclmoqoAQQ3T2Pp1OPfPCzDj0Gtyh6pygJ9KICIogVzGZhtxpRATp1shIBn32audmAECoaaRdlo0btprmbAYIWkuUDEzB1K3zOuljx1EAHhh+p/7Rc8jcnDymqh4J7sqnE2RfHrQfK719lQFGWX7DkV+PA9xwVvvs/VbjZ4z7t5yD6D1/iLF2zuprmnVdDU0Utqxp2xDxdDpPVnLO89tccvbQrapi4JxzKVpKocyqClQD4W56glpANjJDJQCOMaUSqPbmmJvHQ1UxZzIzE0NA5sOH48fvPn36/PHjp08InK5JxTgG74Xsvk5/RPHll6C79gVMwYAptGWcZyD0ucSm9BCNCCvMRMt/hkoW1PyG1Z5BqNeGEErevBIfHeKKhqYND+TYH1eK+ctLq1+xB5rrkgsAArXO1aqImBvYqHuBmvl0T4L9cjf96D5Gu78qta2+ZENDGUmrk/jjRz068K35ZvoiGKDFT76hAUZdM3ICfiUOcGf8DFIK7k7YCb35ce8Ev5/gC2QAQDPJeVtWNRIppRRRzDkHns7nc9S8bdtyPoMCWs08CyHkvHmmuxGEGQOSgZVSqJZ7ExKiQTFEdEQMAIBJRWRWVUUIagBwyTlrZlAxY+bD4XA6H4/nJ57YFCkiT5FjRCYjtFYV5jwgYKBelwweAGaKMBg8I3E0Wq8j0Cmgz9841KqVAZiYiEJgZpZSEXnNLKU0RVbV19dXT+4IjHOIHRvCX3JvOJcs5bxtG+2oMDRO1qi0/Qv60WpXUBVVImJ20/KszywzYzFCVICae6juNgZHT0HP5SEzsioTQieeG2NbWypEB9v3R/Tw2UBOPaERdSA2v9s/YIDxbBwM0Hu18CUSfzxnPKG7e2GU/bcWITOLqZmo6rquAlRMAMmQEPhwiOfz2dbL4XA4nU4mZiKRQwiBp4grllKWtGEgniZEVFMRAwa0iuynJv0FHLU4aixRoxSHNzTAaZpQMYNiUY4hTDHM0zTPouqOngonc1v1UtnAkZ/BPAMCCPEr5sHI+Y874zmt7wP0JJle24XtZVS1mAZGAPjll1+2bVPVeQpwOPrywEW1I694hbFHD5dlmbSiOPY5AydxHcGcbwRwl/RuYIgIwGiI35nQ1RlwJyjvlNWdyH+knxpqVLXBpYOIZverC2wti6rob0f93/8Amq0+e/RONFkAAAAASUVORK5CYII=", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "import io\n", - "\n", - "import requests\n", - "from PIL import Image\n", - "from IPython import display\n", - "\n", - "url = \"https://pai-sdk.oss-cn-shanghai.aliyuncs.com/resources/images/11563567033_b822736d84_c.jpeg\"\n", - "\n", - "data = requests.get(url).content\n", - "\n", - "display.display(Image.open(io.BytesIO(data)))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.predictor import RawResponse\n", - "\n", - "resp: RawResponse = p.raw_predict(data=data)\n", - "\n", - "print(resp.json())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "在测试完成之后,我们可以将创建的服务删除。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "p.delete_service()" - ] - } - ], - "metadata": { - "execution": { - "timeout": 1800 - }, - "kernelspec": { - "display_name": "pai-dev-py38", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/source/tutorial/predict.rst b/docs/source/tutorial/predict.rst deleted file mode 100644 index bfc2ca8..0000000 --- a/docs/source/tutorial/predict.rst +++ /dev/null @@ -1,12 +0,0 @@ -=========================================== -模型部署 -=========================================== - - -.. toctree:: - :maxdepth: 1 - - model_deploy_container/model_deploy_container - async_inference/async_inference - modelscope_model_deploy/modelscope_model_deploy - huggingface_model_deploy/huggingface_model_deploy diff --git a/docs/source/tutorial/pretrained-model/pretrained-model.ipynb b/docs/source/tutorial/pretrained-model/pretrained-model.ipynb deleted file mode 100644 index 5e6ad2a..0000000 --- a/docs/source/tutorial/pretrained-model/pretrained-model.ipynb +++ /dev/null @@ -1,297 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 使用PAI预置算法微调模型\n", - "\n", - "预训练模型(pre-trained model)是通过在大规模数据集上进行训练,从而学习到数据的特征表示的深度学习模型。因为模型是通过大规模的数据进行预训练,因而可以通过少量的数据集进行训练,避免从头训练模型的高额成本。在应用时,预训练模型可以被作为基础模型,然后在特定任务的有标注数据集上进行微调,从而适应特定任务的要求。\n", - "\n", - "PAI在公共模型仓库中,提供了不同领域,包括计算机视觉、自然语言处理、语音等的常见热门预训练模型,例如 `QWen`、`Bert`、`ChatGLM`、`LLama2`、`StableDiffusion 2.1` 等,并提供了相应的预置算法,用户仅需提供数据集,即可在PAI上完成模型微调训练。\n", - "\n", - "在本示例中,我们将以`Bert`模型为示例,展示如何使用PAI提供的预置算法对模型进行微调训练。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "## 安装和配置SDK\n", - "\n", - "\n", - "我们需要首先安装PAI Python SDK以运行本示例。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "!python -m pip install --upgrade alipai" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "SDK需要配置访问阿里云服务需要的AccessKey,以及当前使用的工作空间和OSS Bucket。在PAI SDK安装之后,通过在**命令行终端** 中执行以下命令,按照引导配置密钥、工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过以下代码验证配置是否已生效。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "hide-output" - ] - }, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "\n", - "sess = get_default_session()\n", - "\n", - "# 获取配置的工作空间信息\n", - "assert sess.workspace_name is not None\n", - "print(sess.workspace_name)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 查看PAI提供的预训练模型\n", - "\n", - "我们可以通过参数`provider`为`pai`,获取`PAI`公共模型仓库下的模型,其中包含了PAI提供的模型和从开源社区精选的模型。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.model import RegisteredModel\n", - "\n", - "\n", - "data = [[\"ModelName\", \"Task\", \"Revision\"]]\n", - "\n", - "# 获取公共模型仓库'pai'提供的模型列表\n", - "for m in RegisteredModel.list(model_provider=\"pai\"):\n", - " revision = m.version_labels.get(\"revision\")\n", - " license = m.version_labels.get(\"license\")\n", - " task = m.task\n", - " data.append([m.model_name, task, revision])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from IPython.display import HTML, display\n", - "\n", - "display(\n", - " HTML(\n", - " \"{}
\".format(\n", - " \"\".join(\n", - " \"{}\".format(\"\".join(str(_) for _ in row))\n", - " for row in data\n", - " )\n", - " )\n", - " )\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 使用模型的预置算法微调训练\n", - "\n", - "通过`model_name`和`model_provider`参数,我们可以获取PAI提供的预训练模型(`RegisteredModel`对象),`RegisteredModel`对象包含了模型所在的OSS Bucket信息,以及模型的预训练算法配置。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.model import RegisteredModel\n", - "\n", - "# 获取PAI模型仓库中的bert-base-uncased模型\n", - "m = RegisteredModel(\n", - " model_name=\"bert-base-uncased\",\n", - " model_provider=\"pai\",\n", - ")\n", - "\n", - "print(m)\n", - "\n", - "# 查看模型的公共读OSS Bucket路径\n", - "print(m.model_data)\n", - "# 查看模型的训练算法配置\n", - "print(m.training_spec)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "获取 `bert-base-uncased` 模型的预置微调算法,以及算法的超参定义和输入数据定义。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import json\n", - "from pai.estimator import AlgorithmEstimator\n", - "\n", - "\n", - "# 通过注册模型的配置,获取相应的预训练算法\n", - "est: AlgorithmEstimator = m.get_estimator()\n", - "\n", - "# 查看算法的超参定义\n", - "print(json.dumps(est.hyperparameter_definitions, indent=4))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 查看算法的输入输出通道定义\n", - "print(json.dumps(est.input_channel_definitions, indent=4))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 默认的超参信息\n", - "print(\"before\")\n", - "print(est.hyperparameters)\n", - "\n", - "\n", - "# 配置超参\n", - "est.set_hyperparameters(batch_size=32)\n", - "\n", - "print(\"after\")\n", - "print(est.hyperparameters)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "模型默认自带了测试的训练数据,例如BERT模型提供的预置算法可以用于训练一个文本分类模型,默认提供了[情感分类数据集sst2](https://huggingface.co/datasets/sst2),可以直接用于模型的微调训练。\n", - "训练数据格式为一个`jsonline`文件,每一行为一个json对象,包含了`label`和`text`两个字段,分别表示文本的标签和文本内容。\n", - "\n", - "```json\n", - "{\"label\": \"negative\", \"text\": \"hide new secretions from the parental units \"}\n", - "{\"label\": \"negative\", \"text\": \"contains no wit , only labored gags \"}\n", - "{\"label\": \"positive\", \"text\": \"that loves its characters and communicates something rather beautiful about human nature \"}\n", - "...\n", - "...\n", - "\n", - "```\n", - "\n", - "用户可以参考以上的数据格式准备数据,当前示例中,我们将基于PAI提供的数据集对模型进行微调训练。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 获取模型自带的训练输入\n", - "default_inputs = m.get_estimator_inputs()\n", - "\n", - "# 默认的算法训练输入,包含了算法使用的预训练模型,训练数据,以及验证数据。\n", - "print(json.dumps(default_inputs, indent=4))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "我们将模型配置的默认的数据集作为训练输入,使用模型预置的PAI算法提交训练作业。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "est.fit(inputs=default_inputs)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "在训练结束之后获取产出模型的OSS Bucket路径。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 查看输出模型路径\n", - "print(est.model_data())" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "base", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/source/tutorial/pytorch_ddp/pytorch_ddp.ipynb b/docs/source/tutorial/pytorch_ddp/pytorch_ddp.ipynb deleted file mode 100644 index 86b853d..0000000 --- a/docs/source/tutorial/pytorch_ddp/pytorch_ddp.ipynb +++ /dev/null @@ -1,330 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 提交PyTorch分布式作业\n", - "\n", - "\n", - "PAI支持用户提交分布式PyTorch训练作业,本文将介绍如何使用PAI Python SDK,以[PyTorch DDP(DistributedDataParallel)](https://pytorch.org/docs/stable/notes/ddp.html)模式提交分布式PyTorch训练作业。\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "## 安装和配置SDK\n", - "\n", - "我们需要首先安装PAI Python SDK以运行本示例。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "skip-execution" - ] - }, - "outputs": [], - "source": [ - "!python -m pip install --upgrade alipai" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "!python -m pip install pygments" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "SDK需要配置访问阿里云服务需要的AccessKey,以及当前使用的工作空间和OSS Bucket。在PAI SDK安装之后,通过在 **命令行终端** 中执行以下命令,按照引导配置密钥、工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过以下代码验证配置是否已生效。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "\n", - "sess = get_default_session()\n", - "\n", - "# 获取配置的工作空间信息\n", - "assert sess.workspace_name is not None\n", - "print(sess.workspace_name)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## PyToch 分布式作业介绍\n", - "\n", - "[PyTorch DDP(Distributed Data Parallel)](https://pytorch.org/docs/stable/notes/ddp.html)是PyTorch提供的分布式数据并行训练功能,支持模型在多台机器上进行并行训练,从而提高训练效率。\n", - "\n", - "PyTorch DDP基于多进程的方案实现,支持单机多卡模式和多机多卡模式。在单机多卡模式下,用户可以使用同一台机器下的多个GPU来加速模型的训练。在多机多卡模式下,可以将计算任务分配到多台机器上进行并行计算,加速训练速度。对于DDP的详细介绍,可以参考PyTorch的[官方文档链接](https://pytorch.org/docs/stable/notes/ddp.html)。\n", - "\n", - "\n", - "![PyTorch DDP](./resource/ddp.png)\n", - "\n", - "> PyTorch提供的`DataParallel`和`DistributedDataParallel`模块都支持数据并行训练,[PyTorch官方](https://pytorch.org/tutorials/intermediate/ddp_tutorial.html#comparison-between-dataparallel-and-distributeddataparallel)推荐不论是单机多卡还是多机多卡,都使用`DistributedDataParallel`模块进行训练。\n", - "\n", - "### 代码适配DDP改造\n", - "\n", - "使用PyTorch DDP进行分布式训练需要对原先的PyTorch训练代码(使用单机单卡)进行的修改,具体可以参考[PyTorch官方文档](https://pytorch.org/tutorials/beginner/dist_overview.html#torch-nn-parallel-distributeddataparallel)。\n", - "\n", - "主要包括:\n", - "\n", - "- 初始化分布式训练配置:\n", - "\n", - "需要在训练迭代开始之前完成训练环境初始化。\n", - "\n", - "```python\n", - "\n", - "from torch.distributed import init_process_group, destroy_process_group\n", - "\n", - "def ddp_setup()\n", - " init_process_group(backend=\"nccl\")\n", - "\n", - "```\n", - "\n", - "初始化需要指定机器之间的通讯方式,当使用GPU进行训练时,通常使用`nccl`作为通讯后端,而使用CPU训练时,使用`gloo`,详细的介绍可以参考PyTorch文档: [Which Backend To Use?](https://pytorch.org/docs/stable/distributed.html#which-backend-to-use)\n", - "\n", - "- 使用DDP封装模型:\n", - "\n", - "```python\n", - "\n", - "from torch.nn.parallel import DistributedDataParallel as DDP\n", - "\n", - "# model是原始单机单卡训练的PyTorch模型\n", - "model = DDP(model)\n", - "\n", - "```\n", - "\n", - "\n", - "- 修改DataLoader的采样方式:\n", - "\n", - "当使用DDP进行数据并行训练,不同的worker进程需要读取不同的数据分片进行训练。当不同机器上通过共享存储的方式使用同一份数据集时,可以使用`torch.utils.data.distributed.DistributedSampler`来对数据进行采样,从而保证不同的worker进程读取不同的数据分片。\n", - "\n", - "```python\n", - "\n", - "from torch.utils.data import DataLoader\n", - "from torch.utils.data.distributed import DistributedSampler\n", - "\n", - "train_sampler = DistributedSampler(\n", - "\ttrain_dataset,\n", - "\tshuffle=True)\n", - "\n", - "trainloader = DataLoader(\n", - "\ttrain_dataset,\n", - "\tbatch_size=args.per_device_train_batch_size,\n", - "\tsampler=train_sampler,\n", - "\tnum_workers=2,\n", - "\tdrop_last=True)\n", - "\n", - "```\n", - "\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "### PAI支持PyTorch DDP分布式训练\n", - "\n", - "PAI原生支持的PyTorch的分布式训练,当用户提交训练作业,指定作业类型为PyTorch训练作业时(`job_type=\"PyTorchJob\"`),PAI的训练服务会在机器节点上设置环境变量,包含作业机器数量,机器RANK,机器之间的通讯地址等信息。\n", - "\n", - "| 环境变量名 | \t描述 |\n", - "|:----------|:---------|\n", - "|MASTER_ADDR | Master机器节点的服务地址 |\n", - "|MASTER_PORT | Master机器节点端口号,如:23456 |\n", - "|WORLD_SIZE\t | 分布式作业的**机器节点总数**,例如提交的训练作业申请了4台机器,则WORLD_ISZE=4 |\n", - "|RANK\t| **机器节点的RANK**,例如启动了一个4个节点的作业,则各个机器节点的RANK分别为0,1,2,3 |\n", - "\n", - "\n", - "`PyTorch`提供了分布式训练启动工具,`torchrun`(PyTorch 1.1.0及以上版本) 和 `torch.distributed.launch`(PyTorch 1.1.0版本以下),支持训练作业的拉起。配合以上PAI预置的环境变量,我们可以便利得启动分布式训练作业。\n", - "\n", - "\n", - "\n", - "使用`torch.distributed.launch`拉起训练作业示例:\n", - "\n", - "```shell\n", - "\n", - "# for PyTorch<1.1.0\n", - "\n", - "python -m torch.distributed.launch \\\n", - "--nproc_per_node= \\\n", - "--master_addr=$MASTER_ADDR \\\n", - "--master_port=$MASTER_PORT \\\n", - "--nnodes=$WORLD_SIZE \\\n", - "--node_rank=$RANK \\\n", - " training_arguments...\n", - "\n", - "```\n", - "\n", - "使用`torchrun`拉起训练作业示例:\n", - "\n", - "```shell\n", - "\n", - "# for PyTorch>=1.1.0\n", - "torchrun \\\n", - "--nproc_per_node= \\\n", - "--master_addr=$MASTER_ADDR \\\n", - "--master_port=$MASTER_PORT \\\n", - "--nnodes=$WORLD_SIZE \\\n", - "--node_rank=$RANK \\\n", - " training_arguments...\n", - "\n", - "```\n", - "\n", - "用户需要修改` 以上的作业启动命令,同样适用于单机多卡的训练作业启动。\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 提交训练作业\n", - "\n", - "PAI Python SDK提供了Estimator的接口,用于提交训练作业,以下示例中,我们将通过Estimator提交一个PyTorch分布式训练作业。\n", - "\n", - "\n", - "- 准备训练代码\n", - "\n", - "PyTorch提供了多机多卡的[训练代码示例](https://github.com/pytorch/examples/blob/main/distributed/ddp-tutorial-series/multinode.py),在修改了模型和checkpoints保存路径后,我们既可以将其用于提交到PAI进行训练。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 通过以下代码查看准备提交的训练代码\n", - "!pygmentize train_src/train_multinode.py" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "- 提交训练作业\n", - "\n", - "我们将使用PAI提供的PyTorch 1.12版本的GPU镜像完成多机多卡的作业训练。使用`estimator.fit`提交训练作业之后,SDK会打印作业的控制台链接,用户可以通过控制台查看作业状态,日志详情等信息。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.estimator import Estimator\n", - "from pai.image import retrieve\n", - "\n", - "# 使用PAI提供的PyTorch 1.12 GPU镜像\n", - "image_uri = retrieve(\n", - " \"pytorch\",\n", - " framework_version=\"1.12\",\n", - " accelerator_type=\"GPU\",\n", - ").image_uri\n", - "print(\"Training Image URI: \", image_uri)\n", - "\n", - "\n", - "# 每一个机器实例的GPU数量,需要根据用户选择的机器型号(instance_type)进行修改\n", - "gpu_count_per_instance = 2\n", - "\n", - "# 训练脚本使用torchrun命令启动\n", - "command = f\"\"\"torchrun --master_addr=$MASTER_ADDR \\\n", - "--master_port=$MASTER_PORT \\\n", - "--nnodes=$WORLD_SIZE --node_rank=$RANK \\\n", - "--nproc_per_node={gpu_count_per_instance} \\\n", - "train_multinode.py --total_epochs 10 --save_every 5 \\\n", - "\"\"\"\n", - "\n", - "\n", - "# 提交训练作业\n", - "est = Estimator(\n", - " image_uri=image_uri,\n", - " source_dir=\"./train_src\", # 训练代码所在目录\n", - " command=command,\n", - " job_type=\"PyTorchJob\",\n", - " instance_type=\"ecs.gn6i-c24g1.12xlarge\", # 2 * NVIDIA T4 GPU\n", - " instance_count=2, # 机器实例数量\n", - " base_job_name=\"pytorch-ddp\",\n", - ")\n", - "\n", - "# fit方法提交训练作业,默认等待到作业执行完成\n", - "est.fit()\n", - "\n", - "\n", - "# 查看作业的输出模型\n", - "\n", - "est.model_data()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 参考:\n", - "\n", - "- PyTorch Distributed Overview: https://pytorch.org/tutorials/beginner/dist_overview.html" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "pai-dev-py38", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.16" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/source/tutorial/pytorch_ddp/resource/ddp.png b/docs/source/tutorial/pytorch_ddp/resource/ddp.png deleted file mode 100644 index ef2638d..0000000 Binary files a/docs/source/tutorial/pytorch_ddp/resource/ddp.png and /dev/null differ diff --git a/docs/source/tutorial/pytorch_mnist/pytorch_mnist.ipynb b/docs/source/tutorial/pytorch_mnist/pytorch_mnist.ipynb deleted file mode 100644 index fa5de20..0000000 --- a/docs/source/tutorial/pytorch_mnist/pytorch_mnist.ipynb +++ /dev/null @@ -1,969 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "tags": [ - "skip-execution" - ] - }, - "source": [ - "# 基于PyTorch训练和部署MNIST图片分类模型\n", - "\n", - "PyTorch是一个非常流行的深度学习框架,提供了极高的灵活性和优越的性能,能够与Python丰富的生态无缝结合,被广泛应用于图像分类、语音识别、自然语言处理、推荐、AIGC等领域。本示例中,我们将使用PAI Python SDK,在PAI完成一个PyTorch模型的训练,然后使用训练获得的模型部署推理服务。主要流程包括:\n", - "\n", - "- Step1: 安装和配置SDK\n", - "\n", - "安装PAI Python SDK,并配置使用的AccessKey、工作空间以及OSS Bucket。\n", - "\n", - "- Step2: 准备训练数据\n", - "\n", - "我们下载一个MNIST数据集,上传到OSS上供训练作业使用。\n", - "\n", - "- Step3: 准备训练脚本\n", - "\n", - "我们使用PyTorch示例仓库中的MNIST训练脚本作为模板,在简单修改之后作为训练脚本。\n", - "\n", - "- Step4: 提交训练作业\n", - "\n", - "使用PAI Python SDK提供的Estimator API,创建一个训练作业,提交到云上执行。\n", - "\n", - "- Step5: 部署推理服务\n", - "\n", - "将以上训练作业输出的模型,分别使用Processor和镜像部署的方式部署到PAI-EAS,创建在线推理服务。\n", - "\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "## Step1: 安装和配置SDK\n", - "\n", - "我们需要首先安装PAI Python SDK以运行本示例。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "!python -m pip install --upgrade alipai\n", - "!python -m pip install pandas" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "SDK需要配置访问阿里云服务需要的AccessKey,以及当前使用的工作空间和OSS Bucket。在PAI SDK安装之后,通过在**命令行终端** 中执行以下命令,按照引导配置密钥、工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过以下代码验证配置是否已生效。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "\n", - "sess = get_default_session()\n", - "\n", - "# 获取配置的工作空间信息\n", - "assert sess.workspace_name is not None\n", - "print(sess.workspace_name)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Step2: 准备训练数据\n", - "\n", - "当前示例中,我们将使用MNIST数据集训练一个图片分类模型。为了支持训练作业加载使用,我们需要将数据上传到OSS Bucket上。\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "使用以下的Shell脚本,我们将MNIST数据集下载到本地目录data。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "vscode": { - "languageId": "shellscript" - } - }, - "outputs": [], - "source": [ - "%%sh\n", - "\n", - "#!/bin/sh\n", - "set -e\n", - "\n", - "url_prefix=\"https://ossci-datasets.s3.amazonaws.com/mnist/\"\n", - "# 如果以上的地址下载速度较慢,可以使用以下地址\n", - "# url_prefix=\"http://yann.lecun.com/exdb/mnist/\"\n", - "\n", - "mkdir -p data/MNIST/raw/\n", - "\n", - "wget ${url_prefix}train-images-idx3-ubyte.gz -P data/MNIST/raw/\n", - "wget ${url_prefix}train-labels-idx1-ubyte.gz -P data/MNIST/raw\n", - "wget ${url_prefix}t10k-images-idx3-ubyte.gz -P data/MNIST/raw\n", - "wget ${url_prefix}t10k-labels-idx1-ubyte.gz -P data/MNIST/raw\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "我们将使用PAI Python SDK提供的OSS上传API,将相应的数据上传到OSS Bucket上。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.common.oss_utils import upload\n", - "from pai.session import get_default_session\n", - "\n", - "sess = get_default_session()\n", - "data_path = \"./data\"\n", - "\n", - "data_uri = upload(data_path, oss_path=\"mnist/data/\", bucket=sess.oss_bucket)\n", - "\n", - "print(data_uri)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "## Step3: 准备训练脚本\n", - "\n", - "使用PyTorch训练模型,需要我们准备相应的脚本。这里我们以PyTorch官方提供的 [MNIST 示例](https://github.com/pytorch/examples/blob/main/mnist/main.py) 为基础,修改了数据加载和模型保存的逻辑,作为训练脚本。\n", - "\n", - "- 使用环境变量获得输入数据路径\n", - "\n", - "训练数据将被挂载到训练作业环境中使用,训练代码需要读取指定的路径获取训练数据。\n", - "\n", - "\n", - "```diff\n", - "\n", - "- dataset1 = datasets.MNIST(\"../data\", train=True, download=True, transform=transform)\n", - "- dataset2 = datasets.MNIST(\"../data\", train=False, transform=transform)\n", - "\n", - "+\t # 使用挂载到训练容器中的数据,默认为 /ml/input/{ChannelName},可以通过环境变量 `PAI_INPUT_{ChannelNameUpperCase}`\n", - "+ data_path = os.environ.get(\"PAI_INPUT_TRAIN_DATA\")\n", - "+ dataset1 = datasets.MNIST(data_path, train=True, download=True, transform=transform)\n", - "+ dataset2 = datasets.MNIST(data_path, train=False, transform=transform)\n", - "\n", - "\n", - "```\n", - "\n", - "- 使用环境变量获取模型的保存路径:\n", - "\n", - "用户需要保存模型到工作容器中的指定路径,PAI的训练服务将其才能够持久化保存模型到OSS Bucket上。默认要求用户需要将模型保存到环境变量 `PAI_OUTPUT_MODEL` 指定的路径下(默认为`/ml/output/model`)。\n", - "\n", - "\n", - "```diff\n", - "\n", - "- if args.save_model:\n", - "- torch.save(model.state_dict(), \"mnist_cnn.pt\")\n", - "\n", - "+ # 保存模型\n", - "+ save_model(model)\n", - "+\n", - "+\n", - "+ def save_model(model):\n", - "+ \"\"\"将模型转为TorchScript,保存到指定路径.\"\"\"\n", - "\n", - "+ output_model_path = os.environ.get(\"PAI_OUTPUT_MODEL\")\n", - "+ os.makedirs(output_model_path, exist_ok=True)\n", - "+\n", - "+ m = torch.jit.script(model)\n", - "+ m.save(os.path.join(output_model_path, \"mnist_cnn.pt\"))\n", - "\n", - "```\n", - "\n", - "PAI提供的预置[PyTorch Processor](https://help.aliyun.com/document_detail/470458.html) 在创建服务时,要求输入的模型是[TorchScript 格式](https://pytorch.org/docs/stable/jit.html) 。在本示例中,我们将模型导出为 `TorchScript格式` ,然后分别使用 `PyTorch Processor` 和镜像方式创建推理服务。\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "运行以下代码,创建一个训练脚本目录。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - }, - "tags": [ - "hide-cell" - ] - }, - "outputs": [], - "source": [ - "!mkdir -p train_src" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "将训练作业脚本保存到`train_src`训练脚本目录,完整的作业脚本如下:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - }, - "tags": [ - "hide-cell" - ] - }, - "outputs": [], - "source": [ - "%%writefile train_src/train.py\n", - "\n", - "# source: https://github.com/pytorch/examples/blob/main/mnist/main.py\n", - "from __future__ import print_function\n", - "\n", - "import argparse\n", - "import os\n", - "\n", - "import torch\n", - "import torch.nn as nn\n", - "import torch.nn.functional as F\n", - "import torch.optim as optim\n", - "from torch.optim.lr_scheduler import StepLR\n", - "from torchvision import datasets, transforms\n", - "\n", - "\n", - "class Net(nn.Module):\n", - " def __init__(self):\n", - " super(Net, self).__init__()\n", - " self.conv1 = nn.Conv2d(1, 32, 3, 1)\n", - " self.conv2 = nn.Conv2d(32, 64, 3, 1)\n", - " self.dropout1 = nn.Dropout(0.25)\n", - " self.dropout2 = nn.Dropout(0.5)\n", - " self.fc1 = nn.Linear(9216, 128)\n", - " self.fc2 = nn.Linear(128, 10)\n", - "\n", - " def forward(self, x):\n", - " x = self.conv1(x)\n", - " x = F.relu(x)\n", - " x = self.conv2(x)\n", - " x = F.relu(x)\n", - " x = F.max_pool2d(x, 2)\n", - " x = self.dropout1(x)\n", - " x = torch.flatten(x, 1)\n", - " x = self.fc1(x)\n", - " x = F.relu(x)\n", - " x = self.dropout2(x)\n", - " x = self.fc2(x)\n", - " output = F.log_softmax(x, dim=1)\n", - " return output\n", - "\n", - "\n", - "def train(args, model, device, train_loader, optimizer, epoch):\n", - " model.train()\n", - " for batch_idx, (data, target) in enumerate(train_loader):\n", - " data, target = data.to(device), target.to(device)\n", - " optimizer.zero_grad()\n", - " output = model(data)\n", - " loss = F.nll_loss(output, target)\n", - " loss.backward()\n", - " optimizer.step()\n", - " if batch_idx % args.log_interval == 0:\n", - " print(\n", - " \"Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}\".format(\n", - " epoch,\n", - " batch_idx * len(data),\n", - " len(train_loader.dataset),\n", - " 100.0 * batch_idx / len(train_loader),\n", - " loss.item(),\n", - " )\n", - " )\n", - " if args.dry_run:\n", - " break\n", - "\n", - "\n", - "def test(model, device, test_loader):\n", - " model.eval()\n", - " test_loss = 0\n", - " correct = 0\n", - " with torch.no_grad():\n", - " for data, target in test_loader:\n", - " data, target = data.to(device), target.to(device)\n", - " output = model(data)\n", - " test_loss += F.nll_loss(\n", - " output, target, reduction=\"sum\"\n", - " ).item() # sum up batch loss\n", - " pred = output.argmax(\n", - " dim=1, keepdim=True\n", - " ) # get the index of the max log-probability\n", - " correct += pred.eq(target.view_as(pred)).sum().item()\n", - "\n", - " test_loss /= len(test_loader.dataset)\n", - "\n", - " print(\n", - " \"\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n\".format(\n", - " test_loss,\n", - " correct,\n", - " len(test_loader.dataset),\n", - " 100.0 * correct / len(test_loader.dataset),\n", - " )\n", - " )\n", - "\n", - "\n", - "def main():\n", - " # Training settings\n", - " parser = argparse.ArgumentParser(description=\"PyTorch MNIST Example\")\n", - " parser.add_argument(\n", - " \"--batch-size\",\n", - " type=int,\n", - " default=64,\n", - " metavar=\"N\",\n", - " help=\"input batch size for training (default: 64)\",\n", - " )\n", - " parser.add_argument(\n", - " \"--test-batch-size\",\n", - " type=int,\n", - " default=1000,\n", - " metavar=\"N\",\n", - " help=\"input batch size for testing (default: 1000)\",\n", - " )\n", - " parser.add_argument(\n", - " \"--epochs\",\n", - " type=int,\n", - " default=14,\n", - " metavar=\"N\",\n", - " help=\"number of epochs to train (default: 14)\",\n", - " )\n", - " parser.add_argument(\n", - " \"--lr\",\n", - " type=float,\n", - " default=1.0,\n", - " metavar=\"LR\",\n", - " help=\"learning rate (default: 1.0)\",\n", - " )\n", - " parser.add_argument(\n", - " \"--gamma\",\n", - " type=float,\n", - " default=0.7,\n", - " metavar=\"M\",\n", - " help=\"Learning rate step gamma (default: 0.7)\",\n", - " )\n", - " parser.add_argument(\n", - " \"--no-cuda\", action=\"store_true\", default=False, help=\"disables CUDA training\"\n", - " )\n", - " parser.add_argument(\n", - " \"--dry-run\",\n", - " action=\"store_true\",\n", - " default=False,\n", - " help=\"quickly check a single pass\",\n", - " )\n", - " parser.add_argument(\n", - " \"--seed\", type=int, default=1, metavar=\"S\", help=\"random seed (default: 1)\"\n", - " )\n", - " parser.add_argument(\n", - " \"--log-interval\",\n", - " type=int,\n", - " default=10,\n", - " metavar=\"N\",\n", - " help=\"how many batches to wait before logging training status\",\n", - " )\n", - " parser.add_argument(\n", - " \"--save-model\",\n", - " action=\"store_true\",\n", - " default=False,\n", - " help=\"For Saving the current Model\",\n", - " )\n", - " args = parser.parse_args()\n", - " use_cuda = not args.no_cuda and torch.cuda.is_available()\n", - "\n", - " torch.manual_seed(args.seed)\n", - "\n", - " device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n", - "\n", - " train_kwargs = {\"batch_size\": args.batch_size}\n", - " test_kwargs = {\"batch_size\": args.test_batch_size}\n", - " if use_cuda:\n", - " cuda_kwargs = {\"num_workers\": 1, \"pin_memory\": True, \"shuffle\": True}\n", - " train_kwargs.update(cuda_kwargs)\n", - " test_kwargs.update(cuda_kwargs)\n", - "\n", - " transform = transforms.Compose(\n", - " [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]\n", - " )\n", - "\n", - " data_path = os.environ.get(\"PAI_INPUT_TRAIN_DATA\", \"../data\")\n", - " dataset1 = datasets.MNIST(data_path, train=True, download=True, transform=transform)\n", - " dataset2 = datasets.MNIST(data_path, train=False, transform=transform)\n", - " train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)\n", - " test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)\n", - "\n", - " model = Net().to(device)\n", - " optimizer = optim.Adadelta(model.parameters(), lr=args.lr)\n", - "\n", - " scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)\n", - " for epoch in range(1, args.epochs + 1):\n", - " train(args, model, device, train_loader, optimizer, epoch)\n", - " test(model, device, test_loader)\n", - " scheduler.step()\n", - "\n", - " # 保存模型\n", - " save_model(model)\n", - "\n", - "\n", - "def save_model(model):\n", - " \"\"\"将模型转为TorchScript,保存到指定路径.\"\"\"\n", - " output_model_path = os.environ.get(\"PAI_OUTPUT_MODEL\", \"./model/\")\n", - " os.makedirs(output_model_path, exist_ok=True)\n", - "\n", - " m = torch.jit.script(model)\n", - " m.save(os.path.join(output_model_path, \"mnist_cnn.pt\"))\n", - "\n", - "\n", - "if __name__ == \"__main__\":\n", - " main()" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "## Step4: 提交训练作业\n", - "\n", - "`Estimator`支持用户使用本地的训练脚本,以指定的镜像在云上执行训练作业。通过`Estimator`,我们将以上准备的训练作业脚本提交到PAI,使用PAI提供的PyTorch镜像执行训练任务。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "from pai.estimator import Estimator\n", - "from pai.image import retrieve\n", - "\n", - "\n", - "# 使用PAI提供的PyTorch的GPU训练镜像\n", - "image_uri = retrieve(\n", - " \"PyTorch\",\n", - " framework_version=\"1.8PAI\",\n", - " accelerator_type=\"GPU\",\n", - ").image_uri\n", - "\n", - "print(image_uri)\n", - "\n", - "\n", - "# 配置训练作业\n", - "est = Estimator(\n", - " # 训练作业启动命令\n", - " command=\"python train.py --epochs 5 --batch-size 256 --lr 0.5\",\n", - " # 需要上传的代码文件\n", - " source_dir=\"./train_src/\",\n", - " # 训练作业镜像\n", - " image_uri=image_uri,\n", - " # 机器配置\n", - " # PAI的训练服务支持机器实例类型请见文档:[公共资源组实例和定价](https://help.aliyun.com/document_detail/171758.html?#section-55y-4tq-84y)\n", - " instance_type=\"ecs.gn6i-c4g1.xlarge\", # 4vCPU 15GB 1*NVIDIA T4\n", - " # 训练作业的Metric捕获配置\n", - " # 训练服务支持从训练作业输出日志中(训练脚本打印的标准输出和标准错误输出),以正则表达式匹配的方式捕获训练作业Metrics信息。\n", - " metric_definitions=[\n", - " {\n", - " \"Name\": \"loss\",\n", - " \"Regex\": r\".*loss=([-+]?[0-9]*.?[0-9]+(?:[eE][-+]?[0-9]+)?).*\",\n", - " },\n", - " ],\n", - " base_job_name=\"pytorch_mnist\",\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "`estimator.fit`方法将用户的训练作业提交到PAI上执行。任务提交之后,SDK会打印作业详情页链接和训练作业的日志,等待作业执行结束。\n", - "\n", - "当用户需要直接使用OSS上数据,可以通过`estimator.fit`方法的`inputs`参数传递。通过`inputs`传递数据存储路径会被挂载到目录下,用户的训练脚本可以通过读取本地文件的方式加载数据。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 使用.fit方法提交训练作业\n", - "est.fit(\n", - " inputs={\n", - " # 训练作业的输入数据,每一个Key,Value对是一个Channel,用户可以通过环境变量PAI_INPUT_{ChannelNameUpperCase}获取对应的数据路径\n", - " # 例如以下的train_data,训练的脚本中可以通过`PAI_INPUT_TRAIN_DATA`获取数据挂载后的路径.\n", - " \"train_data\": data_uri,\n", - " }\n", - ")\n", - "\n", - "# 训练作业产出的模型路径\n", - "print(\"TrainingJob output model data:\")\n", - "print(est.model_data())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "## Step5: 部署推理服务\n", - "\n", - "在训练作业结束之后,我们可以使用`estimator.model_data()`方法拿到训练作业产出模型的OSS路径。下面的流程中,我们将训练产出的模型部署到PAI创建在线推理服务。\n", - "\n", - "部署推理服务的主要流程包括:\n", - "\n", - "- 通过`InferenceSpec`描述如何使用模型构建推理服务\n", - "\n", - "用户可以选择使用Processor或是自定义镜像的模式进行模型部署。以下示例中将分别使用两种方式部署获得的PyTorch模型。\n", - "\n", - "- 通过`Model.deploy`方法,配置服务的使用资源,服务名称,等信息,创建推理服务。\n", - "\n", - "对于部署推理服务的详细介绍,可以见: [文档:部署推理服务](https://pai-sdk.oss-cn-shanghai.aliyuncs.com/pai/doc/latest/user-guide/model.html)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Processor 模式部署\n", - "\n", - "[Processor](https://help.aliyun.com/document_detail/111029.html) 是PAI对于推理服务程序包的抽象描述,他负责加载模型并启动模型推理服务。模型推理服务会暴露API支持用户进行调用。\n", - "\n", - "PAI提供了预置[PyTorch Processor](https://help.aliyun.com/document_detail/470458.html),支持用户方便地将TorchScript格式的模型部署到PAI,创建推理服务。\n", - "\n", - "以下示例代码中,我们通过PyTorch Processor将训练产出的模型部署为一个推理服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - }, - "tags": [] - }, - "outputs": [], - "source": [ - "from pai.model import Model, InferenceSpec\n", - "from pai.predictor import Predictor\n", - "from pai.common.utils import random_str\n", - "\n", - "\n", - "m = Model(\n", - " model_data=est.model_data(),\n", - " # 使用PAI提供的PyTorch Processor\n", - " inference_spec=InferenceSpec(processor=\"pytorch_cpu_1.10\"),\n", - ")\n", - "\n", - "p: Predictor = m.deploy(\n", - " service_name=\"tutorial_pt_mnist_proc_{}\".format(random_str(6)),\n", - " instance_type=\"ecs.c6.xlarge\",\n", - ")\n", - "\n", - "print(p.service_name)\n", - "print(p.service_status)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "`Model.deploy`返回的`Predictor`对象指向创建的推理服务,可以通过`Predictor.predict`方法发送预测请求给到服务,拿到预测结果。\n", - "\n", - "我们使用`numpy`构建了一个测试样本数据,发送给推理服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "import numpy as np\n", - "\n", - "# # 以上保存TorchScript模型要求输入为 Float32, 数据格式格式的形状为 (BatchSize, Channel, Height, Width)\n", - "dummy_input = np.random.rand(2, 1, 28, 28).astype(np.float32)\n", - "\n", - "# np.random.rand(1, 1, 28, 28).dtype\n", - "res = p.predict(dummy_input)\n", - "print(res)\n", - "\n", - "print(np.argmax(res, 1))" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "在测试完成之后,可以通过`Predictor.delete_service`删除推理服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "p.delete_service()" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 镜像部署\n", - "\n", - "Processor模式启动的推理服务性能优越,适合于对于性能较为敏感的场景。对于一些需要灵活自定义的场景,例如模型使用了一些第三方的依赖,或是推理服务需要有前处理和后处理,用户可以通过镜像部署的方式实现。\n", - "\n", - "SDK提供了`pai.model.container_serving_spec()`方法,支持用户使用本地的推理服务代码配合PAI提供的基础镜像的方式创建推理服务。\n", - "\n", - "在使用镜像部署之前,我们需要准备模型服务的代码,负责加载模型、拉起HTTP Server、处理用户的推理请求。我们将使用Flask编写一个模型服务的代码,示例如下:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 准备推理代码保存目录\n", - "!mkdir -p infer_src" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile infer_src/run.py\n", - "\n", - "\n", - "import json\n", - "from flask import Flask, request\n", - "from PIL import Image\n", - "import os\n", - "import torch\n", - "import torchvision.transforms as transforms\n", - "import numpy as np\n", - "import io\n", - "\n", - "app = Flask(__name__)\n", - "# 用户指定模型,默认会被加载到当前路径下。 \n", - "MODEL_PATH = \"/eas/workspace/model/\"\n", - "\n", - "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", - "model = torch.jit.load(os.path.join(MODEL_PATH, \"mnist_cnn.pt\"), map_location=device).to(device)\n", - "transform = transforms.Compose(\n", - " [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]\n", - ")\n", - "\n", - "\n", - "@app.route(\"/\", methods=[\"POST\"])\n", - "def predict():\n", - " # 预处理图片数据\n", - " im = Image.open(io.BytesIO(request.data))\n", - " input_tensor = transform(im).to(device)\n", - " input_tensor.unsqueeze_(0)\n", - " # 使用模型进行推理\n", - " output_tensor = model(input_tensor)\n", - " pred_res =output_tensor.detach().cpu().numpy()[0] \n", - "\n", - " return json.dumps(pred_res.tolist())\n", - "\n", - "\n", - "if __name__ == '__main__':\n", - " app.run(host=\"0.0.0.0\", port=int(os.environ.get(\"LISTENING_PORT\", 8000)))\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "通过`pai.model.container_serving_spec`,我们基于本地脚本和PAI提供的`PyTorch`镜像创建了一个`InferenceSpec`对象。\n", - "\n", - "- 模型服务的代码和启动命令:\n", - " \n", - "用户指定的本地脚本目录source_dir会被上传到OSS,然后挂载到服务容器(默认到 /ml/usercode目录)。\n", - "\n", - "- 推理服务镜像:\n", - "\n", - "PAI 提供了基础的推理镜像支持用户使用,用户可以通过`pai.image.retrieve`方法,指定参数`image_scope=ImageScope.INFERENCE`获取PAI提供的推理镜像。\n", - "\n", - "- 模型服务的第三方依赖包:\n", - "\n", - "模型服务代码或是模型的依赖,可以通过`requirements`参数指定,相应的依赖会在服务程序启动前被安装到环境中。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.model import InferenceSpec, container_serving_spec\n", - "from pai.image import retrieve, ImageScope\n", - "\n", - "torch_image_uri = retrieve(\n", - " framework_name=\"pytorch\", framework_version=\"1.12\", accelerator_type=\"CPU\"\n", - ").image_uri\n", - "\n", - "inf_spec = container_serving_spec(\n", - " command=\"python run.py\",\n", - " source_dir=\"./infer_src/\",\n", - " image_uri=torch_image_uri,\n", - " requirements=[\"flask==2.0.0\"],\n", - ")\n", - "print(inf_spec.to_dict())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "使用训练作业输出的模型,以及以上的 InferenceSpec,我们将通过 Model.deploy API部署一个在线推理服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.model import Model\n", - "from pai.common.utils import random_str\n", - "import numpy as np\n", - "\n", - "\n", - "m = Model(\n", - " model_data=est.model_data(),\n", - " inference_spec=inf_spec,\n", - ")\n", - "\n", - "predictor = m.deploy(\n", - " service_name=\"torch_mnist_script_container_{}\".format(random_str(6)),\n", - " instance_type=\"ecs.c6.xlarge\",\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "我们准备一张 MNIST 测试图片,用于发送给到推理服务。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - }, - "tags": [ - "keep_output" - ] - }, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAAAAABXZoBIAAABvklEQVR4nF2SO2tUURSFv73PPufMMzGG+EALhZhoaxBs7EWwCoiNhfgbLERrQSwshfwA7SyTIIJgJVgpBBEhEZuQQsIgE/XeubMtZu4wk9UuztrrcWAMEwACGQQSs9CUFARyApuhogKICSFgzZFaTZaoigwHolSo2GD6pYTRUTOoZbUmvXJodhoDMUJDwqyflAGYA6vNyrSntfXbq38e95a3PwQtAVSwjELm/v6hb30ZlL6Rwlg2gSLQvb7vn9bS3JuhP6EzFmsIEoF77ptB853Sd05oitRJrcXp5/5zY9Gwb95fR8dlKiCZh799K2Hdm4e7T1s6ZTIiZ/aq11hc2nF/Hw0h1wEj4awX50892vS/v/q3YIl5JrK0WgfeL32wu+c/WJyZKsLcaq/4+uziuXfFi0QkyuhsAIWwQIb5G+4PkPZk6gQSgNCE7mXvrUAgjQcx0Fh7Pzn0DgJGBLAKvHINPgj56GpRNigKnBLAXHDHpaI6YjkXw3/lZCrFAajoBvhcdFMJUk11FEzHE3/3a22yYFPsKK0m7vrHS3Qjx2EE48rLg7dEYvsYqSKCXnjVX2lrg9kPJpOiAVnIAP8B0Kx+GvoyGWQAAAAASUVORK5CYII=", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "\n", - "!pip install -q pillow\n", - "\n", - "\n", - "import base64\n", - "from PIL import Image\n", - "from IPython import display\n", - "import io\n", - "\n", - "\n", - "# raw_data是一张MNIST图片,对应数字9\n", - "raw_data = base64.b64decode(b\"/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/wAALCAAcABwBAREA/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/9oACAEBAAA/APn+rVhpmoarP5GnWNzeTYz5dvE0jfkoJovNMv8ATmK3tjc2zByhE8TIQw6jkdR6VVq9oumPrWuWGlxyLG95cRwK7dFLMFyfzr3aXwp4ltAfB3gWwudI01JNuoa7eZhku5AMHafvFOw2Dn6ZJ4z4yeLk1HUbXwrZSSy2Oh5heeaQu88wG1mLHk4wR9c+1eXUqsVYMpIIOQR2r1D4QazqOs/FnSG1fVLi9ZI5vL+2TNKc+U2ApYnB7/hXml5LLNfXEsxLSvIzOSMEsTk1DRVnT7+60vULe/spmhureQSRSL1Vh0NWNd1mXX9ZuNUuLe2gmuCGkS2QohbABbBJwTjJ9yelZ1f/2Q==\")\n", - "\n", - "im = Image.open(io.BytesIO(raw_data))\n", - "\n", - "display.display(im)\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "推理服务使用 HTTP 请求体内的数据作为输入的图片,SDK 的 `raw_predict` 方法接受 bytes 数据类型的请求,通过 POST 方法,在请求内带上用户推理数据,发送给到推理服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import json\n", - "from pai.predictor import RawResponse\n", - "\n", - "resp: RawResponse = predictor.raw_predict(data=raw_data)\n", - "print(resp.json())\n", - "\n", - "print(np.argmax(resp.json()))" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "测试完成之后可以删除服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "predictor.delete_service()" - ] - } - ], - "metadata": { - "execution": { - "timeout": 1800 - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.16" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/docs/source/tutorial/stable_diffusion_lora/resource/dreambooth.jpeg b/docs/source/tutorial/stable_diffusion_lora/resource/dreambooth.jpeg deleted file mode 100644 index 6f60f55..0000000 Binary files a/docs/source/tutorial/stable_diffusion_lora/resource/dreambooth.jpeg and /dev/null differ diff --git a/docs/source/tutorial/stable_diffusion_lora/stable_diffusion_lora.ipynb b/docs/source/tutorial/stable_diffusion_lora/stable_diffusion_lora.ipynb deleted file mode 100644 index 679a203..0000000 --- a/docs/source/tutorial/stable_diffusion_lora/stable_diffusion_lora.ipynb +++ /dev/null @@ -1,750 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# StableDiffusion模型LoRA微调\n", - "\n", - "[StableDiffusion](https://huggingface.co/runwayml/stable-diffusion-v1-5)是由StabilityAI、CompVis与Runway合作开发并开源的文本生成图像的模型。他可以直接用于文本生成图像的任务,也可以作为基础模型进行微调,从而从数据集上学习到新的风格,或是用于完成新的任务。本文将介绍通过在PAI完成LoRA微调StableDiffusion模型。\n", - "\n", - "## 背景介绍\n", - "\n", - "[LoRA(Low-Rank Adaption of Large Language Model)](https://arxiv.org/abs/2106.09685)是由微软提出的高效微调大语言模型的方法,他通过冻结原始模型参数,在模型上新增低秩矩阵作为可训练参数的方式微调模型。研究者发现,通过在Transformer块的Attention层上添加LoRA低秩矩阵对模型进行微调,能够获得与全参数微调水平相近的模型。相比于全参数的微调,LoRA有以下优点:\n", - "\n", - "- 训练的参数量小,计算资源消耗低,训练速度更快。\n", - " \n", - "- 对于计算资源/显存的要求更低,支持用户在消费级/中低端的GPU卡对大模型进行微调。\n", - "\n", - "- 冻结了原始模型参数,在训练过程中不容易发生灾难性遗忘。\n", - "\n", - "- 产出的模型较小,存储的成本较低,仅需推理时和原始的模型一同使用进行推理。\n", - "\n", - "后续有开发者,将其引入到[StableDiffsion模型的微调](https://github.com/cloneofsimo/lora)中,取得了不错的效果。HuggingFace提供的[Diffusers库](https://github.com/huggingface/diffusers)支持用户使用扩散模型进行训练或是推理,他支持用户使用LoRA微调扩散模型,并提供了相应的训练代码,支持[文生图](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py),以及[DreamBooth](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_lora.py)的LoRA训练。\n", - "\n", - "当前示例,我们将基于[Diffusers库提供的训练代码和文档](https://huggingface.co/docs/diffusers/training/overview),在PAI完成StableDiffusion v1.5模型的LoRA微调训练。\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "## 准备工作\n", - "\n", - "### 安装PAI Python SDK\n", - "\n", - "安装PAI Python SDK,用于提交训练任务到PAI。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 安装PAI Python SDK\n", - "!python -m pip install --upgrade alipai" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "SDK需要配置访问阿里云服务需要的AccessKey,以及当前使用的工作空间和OSS Bucket。在PAI SDK安装之后,通过在**命令行终端** 中执行以下命令,按照引导配置密钥、工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以执行以下代码,验证配置是否成功。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "\n", - "sess = get_default_session()\n", - "\n", - "# 配置成功之后,我们可以拿到工作空间的信息\n", - "assert sess.workspace_name is not None\n", - "assert sess.oss_bucket is not None" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 获取PAI提供的StableDiffusion模型\n", - "\n", - "PAI的公共模型仓库提供了StableDiffusion v1.5模型,用户可以通过以下代码获取模型的信息,用于后续的微调训练。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.session import get_default_session\n", - "from pai.libs.alibabacloud_aiworkspace20210204.models import ListModelsRequest\n", - "\n", - "sess = get_default_session()\n", - "\n", - "# 获取PAI提供的StableDiffusion模型信息\n", - "resp = sess._acs_workspace_client.list_models(\n", - " request=ListModelsRequest(\n", - " provider=\"pai\",\n", - " model_name=\"stable_diffusion_v1.5\",\n", - " )\n", - ")\n", - "model = resp.body.models[0].latest_version\n", - "\n", - "# StableDiffusion 模型的OSS URI(公共读)\n", - "print(f\"StableDiffusion ModelUri: {model.uri}\")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## LoRA TextToImage微调训练\n", - "\n", - "通过LoRA训练StableDiffusion模型,可以快速,低成本地获得一个能够生成指定风格的模型。在以下示例中,我们将使用一个Demo的图像文本数据集,对StableDiffusion模型进行LoRA微调。\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 准备训练数据\n", - "\n", - "当前示例准备了一个简单的文本图片数据集在`train-data`目录下,包含训练的图片以及相应的标注文件(`metadata.jsonl`)。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "!ls -lh train-data/\n", - "!cat train-data/metadata.jsonl" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "我们需要将数据上传到OSS Bucket上,供训练作业使用。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.common.oss_utils import upload\n", - "\n", - "train_data_uri = upload(\"./train-data/\", \"stable_diffusion_demo/text2image/train-data/\")\n", - "print(train_data_uri)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Diffuerser提供的训练脚本默认使用[ImageFolder](https://huggingface.co/docs/datasets/en/image_dataset#imagefolder)格式的数据集,用户可以参考以上的格式准备数据,更加详细的介绍可以见HuggingFace datasets的[ImageFolder数据集文档](https://huggingface.co/docs/datasets/en/image_dataset#imagefolder)。" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "### 准备训练作业脚本\n", - "\n", - "我们将使用Diffusers库提供的[训练作业脚本(train_text_to_image_lora.py)](https://github.com/huggingface/diffusers/blob/v0.17.1/examples/text_to_image/train_text_to_image_lora.py)完成LoRA训练。执行以下代码,我们将代码下载到本地,用于后续提交训练任务。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!mkdir -p train_lora\n", - "\n", - "# code source: https://github.com/huggingface/diffusers/blob/v0.17.1/examples/text_to_image/train_text_to_image_lora.py\n", - "!wget -P train_lora https://raw.githubusercontent.com/huggingface/diffusers/v0.17.1/examples/text_to_image/train_text_to_image_lora.py" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "我们提交的训练作业将使用PAI提供的PyTorch 1.12的GPU镜像运行,我们需要准备一个`requirements.txt`文件在训练代码目录下,以安装一些额外的依赖包。\n", - "\n", - "训练脚本目录提交到PAI上执行训练时,目录下的`requirements.txt`文件将被安装到作业环境中。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile train_lora/requirements.txt\n", - "\n", - "diffusers>=0.17.1\n", - "\n", - "# source: https://github.com/huggingface/diffusers/blob/v0.17.1/examples/text_to_image/requirements.txt\n", - "accelerate>=0.16.0,<=0.18.0\n", - "torchvision\n", - "transformers>=4.25.1,<5.0.0\n", - "datasets\n", - "ftfy\n", - "tensorboard\n", - "Jinja2" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 提交训练作业\n", - "\n", - "Diffuers提供的训练脚本,需要使用`Accelerate`工具启动,并通过命令行参数的方式,传递超参,预训练模型路径/ID,以及训练数据集地址。PAI的训练作业,支持通过环境变量的方式获取输入输出的数据/模型路径,以及训练作业超参。以下脚本中,我们通过环境变量的方式,传递超参、输入输出路径给到训练脚本。\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.image import retrieve\n", - "\n", - "# 使用PAI提供的PyTorch 1.12 GPU镜像\n", - "image_uri = retrieve(\n", - " \"PyTorch\",\n", - " \"1.12\",\n", - " accelerator_type=\"GPU\",\n", - ").image_uri\n", - "\n", - "print(image_uri)\n", - "\n", - "\n", - "# 训练作业启动命令,通过环境变量的方式获取:\n", - "# a)输入输出的模型/数据路径\n", - "# b)训练任务的超参数\n", - "command = \"\"\"accelerate launch --mixed_precision=\"fp16\" train_text_to_image_lora.py \\\n", - " --pretrained_model_name_or_path=$PAI_INPUT_PRETRAINED_MODEL \\\n", - " --train_data_dir=$PAI_INPUT_TRAIN_DATA \\\n", - " --output_dir=$PAI_OUTPUT_MODEL \\\n", - " --logging_dir=$PAI_OUTPUT_TENSORBOARD \\\n", - " --dataloader_num_workers=8 \\\n", - " --resolution=512 --center_crop --random_flip \\\n", - " --train_batch_size=$PAI_HPS_TRAIN_BATCH_SIZE \\\n", - " --gradient_accumulation_steps=$PAI_HPS_GRADIENT_ACCUMULATION_STEPS \\\n", - " --max_train_steps=$PAI_HPS_MAX_TRAIN_STEPS \\\n", - " --learning_rate=$PAI_HPS_LEARNING_RATE \\\n", - " --checkpointing_steps=$PAI_HPS_CHECKPOINTING_STEPS \\\n", - " --max_grad_norm=1 \\\n", - " --lr_scheduler=\"cosine\" --lr_warmup_steps=0 \\\n", - " --validation_prompt=\"$PAI_HPS_VALIDATION_PROMPT\" \\\n", - " --validation_epochs=$PAI_HPS_VALIDATION_EPOCHS \\\n", - " --seed=$PAI_HPS_SEED\"\"\"\n", - "\n", - "\n", - "# 训练作业超参\n", - "hps = {\n", - " \"validation_prompt\": \"a photo of cat in a bucket\", # 验证模型的Prompt\n", - " \"validation_epochs\": 1, # 每隔50个epoch验证一次\n", - " \"max_train_steps\": 10, # 最大训练步数\n", - " \"learning_rate\": 1e-4, # 学习率\n", - " \"train_batch_size\": 2, # 训练batch size\n", - " \"gradient_accumulation_steps\": 1, # 梯度累积步数\n", - " \"checkpointing_steps\": 5, # 每隔100个step保存一次模型\n", - " \"seed\": 1337, # 随机种子\n", - "}" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "以下代码中,我们使用`Estimator`类,指定训练作业使用的镜像,训练作业超参,输入数据路径等,将LoRA训练作业提交到PAI执行。\n", - "\n", - "对于使用SDK提交训练作业的详细介绍,用户可以参考文档: [提交训练作业](https://help.aliyun.com/document_detail/2261505.html)。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.estimator import Estimator\n", - "from pai.image import retrieve\n", - "\n", - "# 使用PAI提供的PyTorch 1.12 GPU镜像\n", - "image_uri = retrieve(\n", - " \"PyTorch\",\n", - " \"1.12\",\n", - " accelerator_type=\"GPU\",\n", - ").image_uri\n", - "\n", - "print(image_uri)\n", - "\n", - "\n", - "# 训练作业启动命令,通过环境变量的方式获取:\n", - "# a)输入输出的模型/数据路径\n", - "# b)训练任务的超参数\n", - "\n", - "command = \"\"\"accelerate launch --mixed_precision=\"fp16\" train_text_to_image_lora.py \\\n", - " --pretrained_model_name_or_path=$PAI_INPUT_PRETRAINED_MODEL \\\n", - " --train_data_dir=$PAI_INPUT_TRAIN_DATA \\\n", - " --output_dir=$PAI_OUTPUT_MODEL \\\n", - " --logging_dir=$PAI_OUTPUT_TENSORBOARD \\\n", - " --dataloader_num_workers=8 \\\n", - " --resolution=512 --center_crop --random_flip \\\n", - " --train_batch_size=$PAI_HPS_TRAIN_BATCH_SIZE \\\n", - " --gradient_accumulation_steps=$PAI_HPS_GRADIENT_ACCUMULATION_STEPS \\\n", - " --max_train_steps=$PAI_HPS_MAX_TRAIN_STEPS \\\n", - " --learning_rate=$PAI_HPS_LEARNING_RATE \\\n", - " --checkpointing_steps=$PAI_HPS_CHECKPOINTING_STEPS \\\n", - " --max_grad_norm=1 \\\n", - " --lr_scheduler=\"cosine\" --lr_warmup_steps=0 \\\n", - " --validation_prompt=\"$PAI_HPS_VALIDATION_PROMPT\" \\\n", - " --validation_epochs=$PAI_HPS_VALIDATION_EPOCHS \\\n", - " --seed=$PAI_HPS_SEED\"\"\"\n", - "\n", - "\n", - "# 训练作业超参\n", - "hps = {\n", - " \"validation_prompt\": \"a photo of cat in a bucket\", # 验证模型的Prompt\n", - " \"validation_epochs\": 1, # 每隔50个epoch验证一次\n", - " \"max_train_steps\": 10, # 最大训练步数\n", - " \"learning_rate\": 1e-4, # 学习率\n", - " \"train_batch_size\": 2, # 训练batch size\n", - " \"gradient_accumulation_steps\": 1, # 梯度累积步数\n", - " \"checkpointing_steps\": 5, # 每隔100个step保存一次模型\n", - " \"seed\": 1337, # 随机种子\n", - "}\n", - "\n", - "\n", - "est = Estimator(\n", - " image_uri=image_uri, # 训练作业使用的镜像\n", - " source_dir=\"train_lora\", # 训练代码路径,代码会被上传,并准备到训练作业环境中\n", - " command=command, # 训练任务启动命令\n", - " instance_type=\"ecs.gn6i-c4g1.xlarge\", # 4 vCPU, 16 GiB 内存, 1 x NVIDIA T4 GPU\n", - " base_job_name=\"sd_lora_t2i_\", # 训练作业名称前缀\n", - " hyperparameters=hps, # 作业超参,训练命令和脚本可以通过 `PAI_HPS_{HP_NAME_UPPER_CASE}` 环境变量,或是读取`/ml/input/config/hpyerparameters.json`文件获取\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "使用`inputs`参数指定准备到训练作业环境的模型和数据,提交训练作业。 \n", - "\n", - "`inputs`参数是一个字典,Key是输入的名称,Value是输入数据的存储路径(例如OSS URI)。相应的数据会被准备到作业执行环境中(通过挂载的方式),训练作业脚本,能够通过环境变量`PAI_INPUT_{KeyUpperCase}`获取到输入数据的路径,通过读取本地文件的方式读取预训练模型和数据。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(\"Input PreTrainedModel: \", model.uri)\n", - "print(\"Input TrainData: \", train_data_uri)\n", - "\n", - "\n", - "# 提交训练作业\n", - "est.fit(\n", - " inputs={\n", - " \"pretrained_model\": model.uri,\n", - " \"train_data\": train_data_uri,\n", - " },\n", - " wait=True,\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "在启动命令中,我们使用`--output_dir=$PAI_OUTPUT_MODEL`,让训练脚本将模型写出到指定的输出目录中。对应的模型数据会被保存到用户的OSS Bucket中,我们可以通过`est.model_data()`获得输出的模型的OSS URI。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "from pai.common.oss_utils import download\n", - "\n", - "print(\"OutputModel Path: \", est.model_data())\n", - "lora_weight_uri = os.path.join(est.model_data(), \"pytorch_lora_weight.bin\")\n", - "lora_model_path = download(oss_path=lora_weight_uri, local_path=\"./lora_model/\")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "以上训练获得LoRA模型,可以使用diffuser的推理pipeline加载使用:\n", - "\n", - "```python\n", - "# StableDiffusionPipeline加载LoRA模型\n", - "\n", - "\n", - "import torch\n", - "from diffusers import StableDiffusionPipeline\n", - "\n", - "# 加载基础模型\n", - "model_id_or_path = \"\"\n", - "pipe = StableDiffusionPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16)\n", - "\n", - "# 加载LoRA模型\n", - "pipe.unet.load_attn_procs(lora_model_path)\n", - "\n", - "# 使用推理pipeline\n", - "image = pipe(\n", - " \"A pokemon with blue eyes.\", num_inference_steps=25, guidance_scale=7.5,\n", - " cross_attention_kwargs={\"scale\": 0.5},\n", - ").images[0]\n", - "\n", - "\n", - "```\n", - "\n", - "或则用户也可以将其转为safetensor格式,在StableDiffusiuson WebUI中使用。\n", - "\n", - "\n", - "```python\n", - "import torch\n", - "from safetensors.torch import save_file\n", - "\n", - "# 加载模型\n", - "lora_model = torch.load(lora_model_bin_path, map_location=\"cpu\")\n", - "\n", - "# 转换为safetensor格式\n", - "save_file(lora_model, \"lora.safetensors\")\n", - "\n", - "```" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## LoRA && DreamBooth微调训练\n", - "\n", - "### DreamBooth简介\n", - "\n", - "DreamBooth是Google的研究人员于2022年提出的技术,支持在少量的图片上进行训练,然后将自定义的主题注入到扩散模型中。\n", - "\n", - "![](./resource/dreambooth.jpeg)\n", - "\n", - "图片来源: https://dreambooth.github.io/\n", - "\n", - "直接使用少量的图片文本数据集对扩散模型进行训练容易导致过拟合,或是语言漂移。DreamBooth使用以下方式避免了模型的退化:\n", - "\n", - "- 用户需要为新的主题选择一个罕见的词(标识符),模型将在训练过程中将这个词和图片的主题进行关联。\n", - "\n", - "- 为了避免过拟合和语言漂移,微调过程中,使用相同类别的图片参与训练(这些图片可以由扩散模型生成)。\n", - "\n", - "对于DreamBooth的详细介绍,用户可以参考[DreamBooth的博客](https://dreambooth.github.io/),以及[HuggingFace博客](https://huggingface.co/blog/dreambooth)对于DreamBooth的介绍。\n", - "\n", - "当通过DreamBooth训练扩散模型时,用户可以选择进行普通的微调(直接微调原始的模型参数),也可以使用LoRA的方式进行微调,在以下示例中,我们将使用LoRA的方式,进行DreamBooth训练。\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 准备训练数据集\n", - "\n", - "为了训练DreamBooth,用户需要准备特定风格的图片数据集,当前示例中,我们准备了数据集在`sks-dog`目录下。\n", - "\n", - "通过以下代码,我们将将数据集上传到OSS上,供训练作业使用。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.common.oss_utils import upload\n", - "\n", - "train_data_uri = upload(\"sks-dog\", \"stable_diffusion/dreambooth/train-sks-dog/\")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 准备训练代码\n", - "\n", - "我们使用HuggingFace Diffusers库提供的训练脚本,通过LoRA && DreamBooth方式训练扩散模型。通过以下代码,我们下载训练脚本到本地。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 创建训练脚本保存路径\n", - "!mkdir -p train_dreambooth/\n", - "\n", - "# 下载HuggingFace diffusers(v1.17.1)库提供的示例代码(因为访问GitHub的网络并不稳定,用户当出现下载失败,可以多尝试几次)\n", - "!wget https://raw.githubusercontent.com/huggingface/diffusers/v0.17.1/examples/dreambooth/train_dreambooth_lora.py -P train_dreambooth/" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "训练作业将使用PAI提供的PyTorch镜像运行脚本,我们需要通过以下的`requirements.txt`安装训练脚本依赖的库。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile train_dreambooth/requirements.txt\n", - "# %%writefile 指令会将当前内容写入到 train_dreambooth/requirements.txt 文件中\n", - "\n", - "diffusers>=0.17.1\n", - "\n", - "# source: https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/requirements.txt\n", - "accelerate>=0.16.0,<=0.18.0 # diffusers 提供的示例代码(v0.17.1)无法运行在accelerate>=0.18.0上.\n", - "torchvision\n", - "transformers>=4.25.1,<5.0.0\n", - "ftfy\n", - "tensorboard\n", - "Jinja2" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 提交训练作业\n", - "\n", - "通过以下代码,我们使用PAI Python SDK,提交训练作业到PAI。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.estimator import Estimator\n", - "from pai.image import retrieve\n", - "\n", - "image_uri = retrieve(\n", - " \"PyTorch\",\n", - " \"latest\",\n", - " accelerator_type=\"GPU\",\n", - ").image_uri\n", - "\n", - "\n", - "# 训练作业启动命令,通过环境变量的方式获取:\n", - "# a)输入输出的模型/数据路径\n", - "# b)训练任务的超参数\n", - "command = \"\"\"accelerate launch train_dreambooth_lora.py \\\n", - " --pretrained_model_name_or_path=$PAI_INPUT_PRETRAINED_MODEL \\\n", - " --instance_data_dir=$PAI_INPUT_TRAIN_DATA \\\n", - " --output_dir=$PAI_OUTPUT_MODEL \\\n", - " --logging_dir=$PAI_OUTPUT_TENSORBOARD \\\n", - " --instance_prompt=\"$PAI_HPS_INSTANCE_PROMPT\" \\\n", - " --resolution=512 \\\n", - " --train_batch_size=$PAI_HPS_TRAIN_BATCH_SIZE \\\n", - " --gradient_accumulation_steps=$PAI_HPS_GRADIENT_ACCUMULATION_STEPS \\\n", - " --checkpointing_steps=$PAI_HPS_CHECKPOINTING_STEPS \\\n", - " --learning_rate=$PAI_HPS_LEARNING_RATE \\\n", - " --lr_scheduler=\"constant\" \\\n", - " --lr_warmup_steps=0 \\\n", - " --max_train_steps=$PAI_HPS_MAX_TRAIN_STEPS \\\n", - " --validation_prompt=\"$PAI_HPS_VALIDATION_PROMPT\" \\\n", - " --validation_epochs=$PAI_HPS_VALIDATION_EPOCHS \\\n", - " --seed=\"0\"\n", - " \"\"\"\n", - "\n", - "# 训练作业超参\n", - "hps = {\n", - " \"instance_prompt\": \"a photo of sks dog\", # 训练的图片数据文本使用的标注Prompt。这里的sks是我们使用的数据集的特定风格标识符。\n", - " \"validation_prompt\": \"a photo of sks dog in a bucket\", # 验证模型的Prompt\n", - " # \"class_prompt\": \"a photo of dog\", # 用于生成类别图片数据,避免模型过拟合&&语言偏移\n", - " \"validation_epochs\": 50, # 每隔50个epoch验证一次\n", - " \"max_train_steps\": 500, # 最大训练步数\n", - " \"learning_rate\": 1e-4, # 学习率\n", - " \"train_batch_size\": 1, # 训练batch size\n", - " \"gradient_accumulation_steps\": 1, # 梯度累积步数\n", - " \"checkpointing_steps\": 100, # 每隔100个step保存一次模型\n", - "}\n", - "\n", - "\n", - "est = Estimator(\n", - " image_uri=image_uri, # 训练作业使用的镜像\n", - " source_dir=\"train_dreambooth\", # 训练代码路径,代码会被上传,并准备到训练作业环境中\n", - " command=command, # 训练任务启动命令\n", - " instance_type=\"ecs.gn6i-c4g1.xlarge\", # 4 vCPU, 16 GiB 内存, 1 x NVIDIA T4 GPU\n", - " base_job_name=\"sd_lora_dreambooth_\", # 训练作业名称前缀\n", - " hyperparameters=hps, # 作业超参,训练命令和脚本可以通过 `PAI_HPS_{HP_NAME_UPPER_CASE}` 环境变量,或是读取`/ml/input/config/hpyerparameters.json`文件获取\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(\"Input PreTrainedModel: \", model.uri)\n", - "print(\"Input TrainData: \", train_data_uri)\n", - "\n", - "est.fit(\n", - " inputs={\n", - " \"pretrained_model\": model.uri,\n", - " \"train_data\": train_data_uri,\n", - " },\n", - " wait=True,\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "训练任务会在输出目录下,生成一个`pytorch_lora_weights.bin`的模型文件,相应的文件会被上传准备到用户的OSS中,用户可以通过以下的代码,将模型文件下载到本地。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import posixpath\n", - "\n", - "from pai.common.oss_utils import download\n", - "\n", - "# 输出模型路径\n", - "output_lora_model = posixpath.join(est.model_data(), \"pytorch_lora_weights.bin\")\n", - "print(\"OutputModel: \", output_lora_model)\n", - "\n", - "model_path = download(output_lora_model, \"./lora_dreambooth_model/\")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "获得的LoRA模型,用户可以通过Diffuser提供的API,在推理pipeline加载使用,用户可以参考diffuser的文档:[DreamBooth Inference](https://huggingface.co/docs/diffusers/training/lora#dreambooth-inference)。" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 结语\n", - "\n", - "通过当前示例,我们介绍了如何基于HuggingFace diffusers库,在PAI上完成StableDiffusion模型的LoRA微调训练。用户可以通过Diffuers库的API,直接在推理Pipeline中加载使用这些LoRA模型,也可以将模型转换Safetensors格式,用于StableDiffusionWebUI中。\n", - "\n", - "除了对于LoRA的支持,Diffusers库支持对于直接对扩散模型微调,也支持包括TextInversion, ControlNet, InstructPix2Pix等方式训练扩散模型,并且提供了相应的训练脚本和教程。用户同样可以参考本示例,在PAI运行这些训练任务。\n", - "\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 参考\n", - "\n", - "- HuggingFace LoRa Tutorial: https://huggingface.co/docs/diffusers/training/lora#texttoimage\n", - "\n", - "- HuggingFace LoRA Blog: https://huggingface.co/blog/lora\n", - "\n", - "- Google DreamBooth Blog:https://dreambooth.github.io/" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "base", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/source/tutorial/stable_diffusion_lora/train-data/cat1.jpg b/docs/source/tutorial/stable_diffusion_lora/train-data/cat1.jpg deleted file mode 100644 index 89d760d..0000000 Binary files a/docs/source/tutorial/stable_diffusion_lora/train-data/cat1.jpg and /dev/null differ diff --git a/docs/source/tutorial/stable_diffusion_lora/train-data/cat2.jpg b/docs/source/tutorial/stable_diffusion_lora/train-data/cat2.jpg deleted file mode 100644 index 13eec49..0000000 Binary files a/docs/source/tutorial/stable_diffusion_lora/train-data/cat2.jpg and /dev/null differ diff --git a/docs/source/tutorial/stable_diffusion_lora/train-data/cat3.jpg b/docs/source/tutorial/stable_diffusion_lora/train-data/cat3.jpg deleted file mode 100644 index 48aba20..0000000 Binary files a/docs/source/tutorial/stable_diffusion_lora/train-data/cat3.jpg and /dev/null differ diff --git a/docs/source/tutorial/stable_diffusion_lora/train-data/metadata.jsonl b/docs/source/tutorial/stable_diffusion_lora/train-data/metadata.jsonl deleted file mode 100644 index 8ad3b35..0000000 --- a/docs/source/tutorial/stable_diffusion_lora/train-data/metadata.jsonl +++ /dev/null @@ -1,3 +0,0 @@ -{"file_name": "cat1.jpg", "text": "cute cat"} -{"file_name": "cat2.jpg", "text": "a little cat"} -{"file_name": "cat3.jpg", "text": "a little cat"} diff --git a/docs/source/tutorial/tensorboard/tensorboard.ipynb b/docs/source/tutorial/tensorboard/tensorboard.ipynb deleted file mode 100644 index 6dd3855..0000000 --- a/docs/source/tutorial/tensorboard/tensorboard.ipynb +++ /dev/null @@ -1,245 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 使用TensorBoard可视化训练过程\n", - "\n", - "TensorBoard是一个用于追踪、可视化、分析模型训练过程和训练结果的工具,它提供了多种可视化功能,可以与PyTorch、TensorFlow、Keras、Huggingface transformers、ModelScope等机器学习框架一起使用,帮助用户了解模型的训练过程和性能。\n", - "\n", - "PAI提供了TensorBoard服务,支持用户在PAI创建TensorBoard应用,用于查看训练作业输出的TensorBoard日志。\n", - "\n", - "本文档将以不同的机器学习框架为示例,展示如何在PAI使用TensorBoard追踪和可视化模型训练过程。\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## 安装和配置SDK\n", - "\n", - "我们需要首先安装PAI Python SDK以运行本示例。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "!python -m pip install --upgrade alipai" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "SDK需要配置访问阿里云服务需要的AccessKey,以及当前使用的工作空间和OSS Bucket。在PAI SDK安装之后,通过在 **命令行终端** 中执行以下命令,按照引导配置密钥、工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过以下代码验证配置是否已生效。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "\n", - "sess = get_default_session()\n", - "\n", - "# 获取配置的工作空间信息\n", - "assert sess.workspace_name is not None\n", - "print(sess.workspace_name)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 提交训练任务" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "我们首先需要准备训练脚本,使用将PyTorch的TensorBoard utility记录TensorBoard日志。\n", - "\n", - "\n", - "> PyTorch提供的TensorBoard utilities的使用可以见文档: [torch.utils.tensorboard 文档](https://pytorch.org/docs/stable/tensorboard.html)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!mkdir -p src" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "镜像里需要先安装TensorBoard,可以在训练目录中准备 ``requirements.txt`` 指定需要按照的第三方库。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile src/requirements.txt\n", - "\n", - "tensorboard" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile src/run.py\n", - "\n", - "import os\n", - "\n", - "import torch\n", - "from torch.utils.tensorboard import SummaryWriter\n", - "\n", - "\n", - "# 通过环境变量获取TensorBoard输出路径,默认为 /ml/output/tensorboard/\n", - "tb_log_dir = os.environ.get(\"PAI_OUTPUT_TENSORBOARD\")\n", - "print(f\"TensorBoard log dir: {tb_log_dir}\")\n", - "writer = SummaryWriter(log_dir=tb_log_dir)\n", - "\n", - "def train_model(iter):\n", - "\n", - "\n", - " x = torch.arange(-5, 5, 0.1).view(-1, 1)\n", - " y = -5 * x + 0.1 * torch.randn(x.size())\n", - "\n", - " model = torch.nn.Linear(1, 1)\n", - " criterion = torch.nn.MSELoss()\n", - " optimizer = torch.optim.SGD(model.parameters(), lr = 0.1)\n", - "\n", - " for epoch in range(iter):\n", - " y1 = model(x)\n", - " loss = criterion(y1, y)\n", - " writer.add_scalar(\"Loss/train\", loss, epoch)\n", - " optimizer.zero_grad()\n", - " loss.backward()\n", - " optimizer.step()\n", - "\n", - "if __name__ == \"__main__\":\n", - " train_model(100)\n", - " writer.flush()\n", - "\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.estimator import Estimator\n", - "from pai.image import retrieve\n", - "\n", - "\n", - "est = Estimator(\n", - " command=\"python run.py\",\n", - " source_dir=\"./src\",\n", - " image_uri=retrieve(\"PyTorch\", \"latest\").image_uri,\n", - " instance_type=\"ecs.c6.large\",\n", - ")\n", - "\n", - "est.fit(wait=False)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 使用TensorBoard应用监控训练" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "在PAI启动一个TensorBoard应用,查看使用Estimator的训练作业写出的TensorBoard日志。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "tb = est.tensorboard()\n", - "\n", - "print(tb.app_uri)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "使用完成之后,删除TensorBoard应用" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "tb.delete()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "base", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/source/tutorial/tensorflow_image_classification/tensorflow_image_classification.ipynb b/docs/source/tutorial/tensorflow_image_classification/tensorflow_image_classification.ipynb deleted file mode 100644 index b2e1f62..0000000 --- a/docs/source/tutorial/tensorflow_image_classification/tensorflow_image_classification.ipynb +++ /dev/null @@ -1,797 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 使用 PAI Python SDK 训练和部署 TensorFlow 模型\n", - "\n", - "[TensorFlow](https://pytorch.org/) 是由Google开发的开源机器学习框架,它可以用于构建和训练各种类型的神经网络和机器学习模型。当前示例中,我们将使用PAI Python SDK,在PAI完成一个TensorFlow图片分类模型的训练和部署。主要流程包括:\n", - "\n", - "\n", - "1. 安装和配置SDK\n", - "\n", - "安装PAI Python SDK,并完成SDK配置.\n", - "\n", - "2. 准备数据集:\n", - "\n", - "这里我们选择使用Fashion-MNIST数据集,将获取的数据集上传到OSS Bucket供训练作业使用。\n", - "\n", - "3. 提交训练作业\n", - "\n", - "按照PAI训练作业的范式,准备TensorFlow训练脚本,然后使用PAI Python SDK提供的Estimator API,将训练脚本提交到云上执行。\n", - "\n", - "4. 部署模型\n", - "\n", - "将以上训练作业输出的模型,部署到PAI-EAS,创建一个在线推理服务。\n", - "\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "## Step1: 准备工作\n", - "\n", - "\n", - "我们需要首先安装PAI Python SDK以运行本示例。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "skip-execution" - ] - }, - "outputs": [], - "source": [ - "!python -m pip install --upgrade alipai" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "SDK 需要配置访问阿里云服务需要的 AccessKey,以及当前使用的工作空间和OSS Bucket。在 PAI SDK 安装之后,通过在 **命令行终端** 中执行以下命令,按照引导配置密钥,工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过以下代码验证当前的配置。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "\n", - "sess = get_default_session()\n", - "\n", - "assert sess.workspace_name is not None\n", - "print(sess.workspace_name)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "## Step2: 准备训练数据\n", - "\n", - "[FashionMNIST](https://github.com/zalandoresearch/fashion-mnist)是一个流行的视觉分类数据集,数据集中包含70,000张28x28像素的灰度图像,这些图像代表了10种不同类型的服装,包括衬衣、裤子、套装、鞋子等等。当前示例将使用`FashionMNIST`数据集训练一个服饰图片分类模型。\n", - "\n", - "我们将首先下载数据到本地,然后再上传到OSS bucket中,供训练作业使用。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "\n", - "# 下载训练数据集\n", - "!mkdir -p fashion-mnist/train/\n", - "!wget http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz -O fashion-mnist/train/images.gz\n", - "!wget http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz -O fashion-mnist/train/labels.gz\n", - "\n", - "\n", - "# 下载测试数据集\n", - "!mkdir -p fashion-mnist/test/\n", - "!wget http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz -O fashion-mnist/test/images.gz\n", - "!wget http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz -O fashion-mnist/test/labels.gz\n", - "\n", - "!ls -lh fashion-mnist" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.common.oss_utils import upload\n", - "from pai.session import get_default_session\n", - "\n", - "sess = get_default_session()\n", - "\n", - "train_data = upload(\n", - " \"fashion-mnist/train/\",\n", - " \"example/data/fashion_mnist/train/\",\n", - " bucket=sess.oss_bucket,\n", - ")\n", - "\n", - "\n", - "test_data = upload(\n", - " \"fashion-mnist/test/\",\n", - " \"example/data/fashion_mnist/test/\",\n", - " bucket=sess.oss_bucket,\n", - ")\n", - "\n", - "print(\"train_data\", train_data)\n", - "print(\"test_data\", test_data)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "tags": [ - "keep_output" - ] - }, - "source": [ - "在本地环境中,加载和验证下载的数据集。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!python -m pip install pillow\n", - "\n", - "import gzip\n", - "import os\n", - "import numpy as np\n", - "from PIL import Image\n", - "from IPython import display\n", - "\n", - "\n", - "def load_dataset(data_path):\n", - " image_path = os.path.join(data_path, \"images.gz\")\n", - " label_path = os.path.join(data_path, \"labels.gz\")\n", - "\n", - " with gzip.open(label_path, \"rb\") as f:\n", - " labels = np.frombuffer(f.read(), dtype=np.int8, offset=8)\n", - "\n", - " with gzip.open(image_path, \"rb\") as f:\n", - " images = np.frombuffer(f.read(), dtype=np.int8, offset=16).reshape(\n", - " len(labels), 28, 28, 1\n", - " )\n", - "\n", - " return images, labels\n", - "\n", - "\n", - "test_images, test_labels = load_dataset(\"./fashion-mnist/test/\")\n", - "train_images, train_labels = load_dataset(\"./fashion-mnist/test/\")\n", - "\n", - "for arr in test_images[:5]:\n", - " im = Image.fromarray(arr.reshape(28, 28), mode=\"L\").resize((100, 100))\n", - " display.display(im)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Step2: 提交训练作业\n", - "\n", - "通过SDK提供的`Estimator`API,用户可以将本地训练作业脚本提交到PAI执行。\n", - "\n", - "### 2.1. 准备训练脚本\n", - "\n", - "以下我们将基于TensorFlow提供的HighLevel Keras 构建一个2层的卷积神经网络训练模型,对于TensorFlow以及Keras API的详细介绍请参见TensorFlow的官方文档: [Basic classification: Classify images of clothing](https://www.tensorflow.org/tutorials/keras/classification)\n", - "\n", - "训练脚本将被提交到PAI执行,在训练脚本的输入输出数据以及超参上需要遵循以下规范:\n", - "\n", - "- 训练作业脚本通过读取本地文件的方式读取挂载到执行环境的数据\n", - "\n", - "输入数据通过 `.fit` API 传递,对应的数据存储路径会被准备到训练作业容器中。执行的训练脚本可以通过环境变量`PAI_INPUT_{CHANNEL_NAME}` 获取输入数据的挂载路径,然后通过读取本地文件的方式拿到输入的数据。\n", - "\n", - "\n", - "- 训练脚本需要将输出的模型保存到指定路径\n", - "\n", - "用户的训练代码必须在训练作业结束之后,将模型写出到 `PAI_OUTPUT_MODEL` 环境变量对应的路径下(默认为 `/ml/output/model/`)。\n", - "\n", - "- 使用输入超参\n", - "\n", - "训练服务预置了一些环境变量,支持用户引用获取超参,输入数据等,其中`PAI_USER_ARGS`是将用户指定的超参以命令行参数的形式拼接起来的字符串。用户的训练脚本可以通过Python argparse库解析输入的超参。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!mkdir -p tf_train_src" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile tf_train_src/train.py\n", - "\n", - "import tensorflow as tf\n", - "import argparse\n", - "import gzip\n", - "import os\n", - "import numpy as np\n", - "\n", - "def load_dataset(data_path):\n", - " image_path = os.path.join(data_path, \"images.gz\")\n", - " label_path = os.path.join(data_path, \"labels.gz\")\n", - " with gzip.open(label_path, \"rb\") as f:\n", - " labels = np.frombuffer(\n", - " f.read(), dtype=np.int8, offset=8\n", - " )\n", - " with gzip.open(image_path, \"rb\") as f:\n", - " images = np.frombuffer(\n", - " f.read(), dtype=np.int8, offset=16\n", - " ).reshape(len(labels), 28, 28, 1)\n", - " return images, labels\n", - "\n", - "\n", - "def train(batch_size, epochs, train_data, test_data):\n", - "\n", - " # Load dataset from input channel 'train' and 'test'.\n", - " train_images, train_labels = load_dataset(train_data)\n", - " test_images, test_labels = load_dataset(test_data)\n", - "\n", - " # model train\n", - " num_classes = 10\n", - " model = tf.keras.Sequential([\n", - " tf.keras.layers.Conv2D(8, (3, 3), activation=\"relu\", input_shape=(28, 28, 1)),\n", - " tf.keras.layers.MaxPooling2D((2, 2)),\n", - " tf.keras.layers.BatchNormalization(),\n", - " tf.keras.layers.Conv2D(16, (3, 3), activation=\"relu\"),\n", - " tf.keras.layers.MaxPooling2D((2, 2)),\n", - " tf.keras.layers.Dropout(0.3),\n", - " tf.keras.layers.Flatten(),\n", - " tf.keras.layers.Dense(64, activation='relu'),\n", - " tf.keras.layers.Dense(num_classes),\n", - " \n", - " ])\n", - " model.compile(optimizer='adam',\n", - " loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n", - " metrics=['accuracy'])\n", - "\n", - " model.fit(train_images, train_labels, batch_size=batch_size, epochs=epochs, validation_data=(test_images, test_labels), verbose=2)\n", - "\n", - " # save model\n", - "\n", - " model_path = os.environ.get(\"PAI_OUTPUT_MODEL\")\n", - " model.save(model_path)\n", - "\n", - " return model\n", - "\n", - "\n", - "def main():\n", - " parser = argparse.ArgumentParser(description=\"PyTorch MNIST Example\")\n", - " parser.add_argument(\n", - " \"--batch_size\",\n", - " type=int,\n", - " default=64,\n", - " metavar=\"N\",\n", - " help=\"input batch size for training (default: 64)\",\n", - " )\n", - " parser.add_argument(\n", - " \"--epochs\",\n", - " type=int,\n", - " default=14,\n", - " metavar=\"N\",\n", - " help=\"number of epochs to train (default: 14)\",\n", - " )\n", - " parser.add_argument(\n", - " \"--train_data\",\n", - " default=os.environ.get(\"PAI_INPUT_TRAIN\"),\n", - " help=\"Path to train data (default: /ml/input/data/train/)\",\n", - " )\n", - " parser.add_argument(\n", - " \"--test_data\",\n", - " default=os.environ.get(\"PAI_INPUT_TEST\"),\n", - " help=\"Path to test data (default: /ml/input/data/test/)\",\n", - " )\n", - "\n", - " args = parser.parse_args()\n", - "\n", - " train(args.batch_size, args.epochs, args.train_data, args.test_data)\n", - "\n", - "\n", - "if __name__ == \"__main__\":\n", - " main()" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "用户可以在本地测试对应的训练作业脚本,例如通过类似的以下命令\n", - "\n", - "```shell\n", - "\n", - "python tf_train_src/train.py --batch_size 32 --epochs 10 --train_data ./fashion-mnist/train --test_data ./fashion-mnist/test\n", - "\n", - "```\n", - "\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 2.3. 使用 Estimator 提交训练作业\n", - "\n", - "\n", - "`Estimator` 支持用户将本地的训练脚本,提交到 PAI ,使用云上的资源执行训练作业,他的主要参数包括以下:\n", - "\n", - "- 用户通过 `entry_point` 和 `source_dir` 指定训练脚本:\n", - "\n", - "`source_dir` 目录是本地执行脚本所在的目录,对应的目录会被打包上传到用户的OSS Bucket,然后准备到训练容器的 `/ml/usercode` 目录下。 `entry_point` 是训练作业的启动脚本,支持使用 Python 或是 Shell 文件。\n", - "\n", - "- 通过 `image_uri` 指定作业的训练镜像:\n", - "\n", - "在当前示例中,我们使用PAI提供的 `2.3` 版本的TensorFlow CPU镜像提交训练作业。\n", - "\n", - "\n", - "- 通过 `hyperparameters` 传递的作业使用的超参:\n", - "\n", - "超参会通过命令行 arguments 的方式传递给到训练脚本。 例如以下示例中,对应的训练脚本的启动命令为:\n", - "\n", - "```shell\n", - "\n", - "python train.py --epochs 20 --batch-size 32\n", - "\n", - "```\n", - "\n", - "- 使用 `metric_definitions` 指定需要采集的Metric:\n", - "\n", - "PAI 的训练服务从训练作业输出日志中,以正则的方式捕获用户指定的Metrics信息。用户可以通过作业详情页查看输出日志。\n", - "\n", - "- 使用 `instance_type` 指定作业使用的机器实例类型:\n", - "\n", - "对于提交训练作业的更加详细的介绍,请查看 [文档:提交训练作业](https://pai-sdk.oss-cn-shanghai.aliyuncs.com/pai/doc/latest/user-guide/estimator.html)\n", - "\n", - "在通过 `.fit` API提交训练作业之后,控制台会打印训练作业的控制台详情链接,用户可以通过该链接到控制台查看作业的日志,采集的Metric,机器资源利用率等更多训练作业信息。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.estimator import Estimator\n", - "from pai.image import retrieve\n", - "\n", - "\n", - "# 获取PAI提供的TensorFlow 2.3的CPU镜像\n", - "image_uri = retrieve(\"TensorFlow\", framework_version=\"2.3\").image_uri\n", - "print(image_uri)\n", - "\n", - "# 配置训练作业\n", - "est = Estimator(\n", - " command=\"python train.py $PAI_USER_ARGS\",\n", - " source_dir=\"./tf_train_src/\",\n", - " image_uri=image_uri,\n", - " instance_type=\"ecs.g6.xlarge\",\n", - " instance_count=1,\n", - " hyperparameters={\n", - " \"batch_size\": 32,\n", - " \"epochs\": 20,\n", - " },\n", - " metric_definitions=[\n", - " {\n", - " \"Name\": \"loss\",\n", - " \"Regex\": r\".*loss: ([-+]?[0-9]*.?[0-9]+(?:[eE][-+]?[0-9]+)?).*\",\n", - " },\n", - " {\n", - " \"Name\": \"accuracy\",\n", - " \"Regex\": r\".*accuracy: ([-+]?[0-9]*.?[0-9]+(?:[eE][-+]?[0-9]+)?).*\",\n", - " },\n", - " {\n", - " \"Name\": \"val_loss\",\n", - " \"Regex\": r\".*val_loss: ([-+]?[0-9]*.?[0-9]+(?:[eE][-+]?[0-9]+)?).*\",\n", - " },\n", - " {\n", - " \"Name\": \"val_accuracy\",\n", - " \"Regex\": r\".*val_accuracy: ([-+]?[0-9]*.?[0-9]+(?:[eE][-+]?[0-9]+)?).*\",\n", - " },\n", - " ],\n", - " base_job_name=\"tf_tutorial_\",\n", - ")\n", - "\n", - "# 提交训练作业\n", - "est.fit(\n", - " {\n", - " \"train\": train_data,\n", - " \"test\": test_data,\n", - " }\n", - ")\n", - "\n", - "print(est.model_data())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "在训练结束之后,用户可以通过`est.model_data()` API拿到用户写出到`/ml/output/model`路径下的模型保存到OSS后的路径地址。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(est.model_data())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "## Step3: 部署推理服务\n", - "\n", - "以下我们将训练产出的模型部署到 PAI 创建在线推理服务,部署推理服务的主要流程包括:\n", - "\n", - "- 通过 `InferenceSpec` 描述如何使用模型构建推理服务。\n", - "\n", - "用户可以选择使用 Processor 模式,或是自定义镜像的模式进行模型部署。这里我们使用了 PAI 提供的预置 TensorFlow Processor部署一个在线服务。\n", - "\n", - "- 通过 `Model.deploy` 方法,配置服务的使用资源,服务名称,等信息,创建推理服务。\n", - "\n", - "- 通过 deploy 方法返回的 `Predictor`,可以向推理服务发送预测请求。\n", - "\n", - "对于部署推理服务的详细介绍,可以见: [文档:部署推理服务](https://pai-sdk.oss-cn-shanghai.aliyuncs.com/pai/doc/latest/user-guide/model.html)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "[Processor](https://help.aliyun.com/document_detail/111029.html) 是 PAI 对于推理服务程序包的抽象描述,他负责加载模型,启动模型推理服务。模型推理服务会暴露 API 支持用户进行调用。\n", - "\n", - "对于 TensorFLow,PAI提供了预置的 [TensorFlow Processor](https://help.aliyun.com/document_detail/468737.html) ,用户可以方便得将获得的 [SavedModel](https://www.tensorflow.org/guide/saved_model) 格式的模型部署到 PAI,创建推理服务。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - }, - "tags": [] - }, - "outputs": [], - "source": [ - "from pai.model import Model, InferenceSpec\n", - "from pai.common.utils import random_str\n", - "\n", - "\n", - "m = Model(\n", - " model_data=est.model_data(),\n", - " # 这里使用了 2.3 版本的 TensorFlow Processor。\n", - " # 一般情况下建议用户使用最新的TensorFlow Processor创建服务。\n", - " inference_spec=InferenceSpec(processor=\"tensorflow_cpu_2.3\"),\n", - ")\n", - "\n", - "p = m.deploy(\n", - " service_name=\"tutorial_tf_{}\".format(random_str(6)),\n", - " instance_type=\"ecs.c6.xlarge\",\n", - ")\n", - "\n", - "print(p.service_name)\n", - "print(p.service_status)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "`Model.deploy` 返回的 `Predictor` 对象指向新创建的推理服务,可以通过 predictor 获取在线服务的状态,发送在线请求给到推理服务。\n", - "\n", - "使用 TensorFlow Processor的在线服务,会通过一个API暴露推理服务的模型的签名信息,包含了模型的输入输出数据格式,[详情可见 TensorFlow Processor介绍](https://help.aliyun.com/document_detail/468737.html#section-w41-c2x-vsb)。\n", - "\n", - "> 当前仅 TensorFlow Processor 拉起的在线服务支持获取模型签名信息。\n", - "\n", - "通过 SDK 提供的 `predictor.inspect_model_signature` 获取相应的模型签名。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "keep_output" - ] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " \"signature_name\": \"serving_default\",\n", - " \"inputs\": [\n", - " {\n", - " \"name\": \"conv2d_input\",\n", - " \"shape\": [\n", - " -1,\n", - " 28,\n", - " 28,\n", - " 1\n", - " ],\n", - " \"type\": \"DT_FLOAT\"\n", - " }\n", - " ],\n", - " \"outputs\": [\n", - " {\n", - " \"name\": \"dense_1\",\n", - " \"shape\": [\n", - " -1,\n", - " 10\n", - " ],\n", - " \"type\": \"DT_FLOAT\"\n", - " }\n", - " ]\n", - "}\n" - ] - } - ], - "source": [ - "import json\n", - "\n", - "\n", - "model_signauture_def = p.inspect_model_signature_def()\n", - "\n", - "print(json.dumps(model_signauture_def, indent=4))" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "通过 `Predictor.predict` 方法,可以向推理服务发送预测请求,拿到模型推理之后的结果。\n", - "\n", - "通过 `inspect_model_signature_def`,我们可以拿到模型的输入签名信息,然后可以使用对应的信息构建我们的请求数据。以上的模型只有一个Input,Name是 conv2d_input,输入数据格式是 (-1, 28, 28, 1),分别为 (BatchSize, Height, Width, ChannelCount), 我们需要将数据reshpe成符合要求的格式,然后发送给到推理服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "keep_output" - ] - }, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAAAAABVicqIAAAJOUlEQVR4nO1Y25IcN3I9JzOBuvR9ZjgUrV1H7P9/lcPeiF2JokTOrbsKmemH6h6OKFtecuUXe85LV0QDOMgLTiYAvOIVr3jFK17xilf8vwf/F1dmIv84EgLn5QCQzEyIKt0dAGB/CAeRfOYRZiSkdphPkX8UCchfWSKZgA0rPsWMf46EFFNGa5G/9nlmJmDrg3yaHr+VZNk2Ves46un+LhDy2RBkZAZg+3daHj5+k7sIEAlQrK4O+3ovpwbkS5Llsx6+M/+B32rJAqnj5vCmG/2UU8aXWyFW252O59W/lSR12F5dXw8j2H+8+4LEuq5ejZqR30SynIgEYKvD9c31sO6G9V+P7deD6na/vhnadPT8FhLgkqzd+nC93wyrvu/mT08v/5duc3273wxxnNo3WXJZh7babDdjNQjz8ZGRCRCZmVb77e3tvi+zXIZ/vbsSEKvDbrdbWYOw2+fqz+GeFPo8hXXj6nBYGY5T0W8hIQQOSF1tD/tNz9Okxbbr7z3aFGIyPz0268Z+NRTEw7HTfyKFtd9eH7Zj9RZFdRiGGtPJtcj88DBr7Us1Rsu+yDdYwnNq2frm7fWmSiyHclh3MRXXIl77RjFRTYqq8KtJiEuFsP27f9l2kilUMxVkRASoKBLe3JucT/7XpzAvmtHvbr/rYo6kmKnEHDGdgk1Vak7zFBAo87lYfX1MVPv9bjPYySOpaibwiBZBUkThcIcITURE4n8m+VzyLsII9Lur24M1P55AFTNTtMwWQGZGpEeEkCJml6D81yTnxbmMuXwtAVm9+/5qmw8xzyyqairhnpEEMjxjbpGgqoWZ6fxV7uJFs7Zv/7Rle/AWJlRTyZjnJIVARkTzSIiohZVSjv8wCZeYJ1i68fp6P0ynU3OgE1NETqcZYgTJ5xpJCkvt+qfI/JLk7KezROdlAhYOcHXYf3cY7TQ9tRSFmaLFdJxpFFBEkEIAhAis9sPT1H5D8gXymSSQAMr27e3bbUmfTg0lqEWj+XRqAkuAIkiSCZAC61frY0T84zFJUGR79ebNrqN7RDChZozW5hbn7gF46THt1rvj3P7bmFxGJnBpP9Ctx+t3bw4DTs1pxJJarc1OEWaGL5rwPF2G3dXpePwNST5zkEDmMmeprsPtzc3NfsU2tWa9p5RimtFaiohyIZGlsAAZIsPh6XT36XcsWcz+3ITANm++v9r1ZZqePFU9WUzFw4NqlIWEWAp7ZgbY7+fHH/X3UjjPGQUAYsXGm8NuXeiteVAokM4EGR4qSgGQwVjkLTO8SXbb+DR+JnkZg/Ph5udmimW122yurjpMp2wtQSpFO0NEZEKUBElkAJGBcBKCstOfB76w5HOreRbbF57Sfvfdm92qNz+2ACkUUavV4B65nI9lXgCRiXBmqFpfdsPvuutMTEjpNvvr253RfZ5CiipFrdYqyISKmS17SmRmJhLhGUmpOnSFsM9n/LL1y6+ImKpp6frVYTcUuntA1IqSamYqAAtROkN45nJ/SACRIUqJSJZhCCOQzJfN7LOTSulqP3R93/fjIC2bJ02smpJy1nGVSisaLds5S3LxmkJiPrmX1Wa6WPJbV9Xa9WO/Wo/D2BVNzO4JNbNiSogKgUwxEVFeRC+ABIlESGabW9i4PtpzTlHIc3FaAtH3/TiMq7Hvi6K1yMTZS0JSmOkAqUJEnI8gAgEhk1SSECt9n5aLF6lWigohFJLUfhj62teuVjNGRoIKsaKCDKUgMsQ1RdPDI3ORCM+knTVfrWbXdZ3bORpaaz9UFYqYCNWG1dAVUyUzW2QCCkoxAQJCZDpBg2q0uaWoCJEt4lx4s4XWDn1XrNgSj9L1/TB2RUTVVKhlWA118YS3iCRFKKK6JAnlrJrizvk4swhFEMigUKxosKlV1mKqBsBK6fq+7/paVU1VBWL90JmQoJCiSaGIgDzHTxFEJNIbptOkksvVOgIAhIgERUWFpAHo1uMw9tXUSi2mFCbVGB4gSZOSIKmU9MhMUrQAke6Z0eJ0nEsBiczICG/m1ZCpFBEi0wAbD/vV0CmSpasmBCNJzwZQ1cQoBKhCR2SCFDUysjGZ4e4REBEQJDMiIv2cQMwIt8Lx8PZq6ITespSiApxbKACMhIo+n57MyKVjEEoCSRWWqqWrlUiCqg6KiIioMMNba3Yj23d/vrGY2wxPn5RMRJzbB2ozFRUgaSo+NwcR4Q4C1NRq6IewvjdBenVvLWjFSJqmT9PpdLRbO/zpL1f5cP+UHjG3RbEzISRIWYKHBNU0vSU1MyIgBFVK32l6iqmQmZnRmlOLMpOabTqdjkdbd/vrN9fNIgHxeHYLEklkhosIEaC5MSmiaqYiZ73vO0NiCQBApM9ONUmPNIa3eZ5togdURu+mubkHwIylG8lEIC+3f4CiqiqmpdYiQqtdMRWGQ6woCZAZzalKb3Mom6kI7ade93ebbiiZGRGemRFza35BQIQgxErt+q6aaq1dEUq3Wnc5T/PxlGUcq3CRlggKs52mJsy+60q19z021/2u76uJZDoi3ed5nts0t3memydVAIjVflitexMrtRoh/W5f4v6T++TU1UrON5IEiJhMG9m6Wkqx+1Y//L2c1mNfiwqVicjqfiGZIylnkm5Yr3olVeDRchbrTh9/ubt78E3dS2FeuidEZmZIRmRmWkwP78vTZuy7UkqttaiKmdXewyPCPQFdstms9p2mt6nN09PjUVbrOt3fPz4e/baNHTURLQIZ6T5NUxMej4+Pj0+GeHp/+qEWM7M6btZj33Wr9aqvJEnBuVdnQoQU+DQ9Pd59+vThw6dZq/p8mueW/5qbzg3ZTs0j3Ju35qF8+Hh3d3c0xGm+IwmhdpvDYb0aV9urFr0WVTPRRQKQACPaPD0+3H388P79D3//6dHJXJqzT9ub+lCY02lq7q01j0AYn3765e5hMiDPz5EAHo9P63EYt/cf170VM1s0+XKPdW+n09Pj/cdf3n/48cefXxTrv/3bTdsuJO7urUWAaTz+x0+Pc/y6JXoSv+9qHVarroqJqojI5VkuEeFzm07Hp4e7+7uHlxMf/l3/OijQWouI8FiKDeef//YYXzzZ0kxUxNR00VDy5StmIjIivLU2z81fTNT1ujMub4OZl/6WiOnpOOMVr3jFK17xilf838R/ApS1P0sVdtkIAAAAAElFTkSuQmCC", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Ankle boot\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAAAAABVicqIAAAPD0lEQVR4nKWa2XPkyHGHf5lZBaDRzeY5nPvQzqHDDwrZitCz/2+/2W8KOyTHSrva3dlDs8O5ODz6ABqoqkw/AE02j+aMZDyQjGahPmRm5YkmfNaV39q2w7cXPiq2Mw2LefiMu91nMfzmeKxhPl39aGvTa8j9dPH/hPjMOy8ikm3ubNr2xokSUfcvKrdGTkNbV1VIKcUQY0z/FGSws7W5McqdZKPNIc2O5xBZQvJBThpi2zZNU82np6ez2T8FcduP7t+9szvy4opRzk3dsvfcQ4jJUopqqZ4df/xw8OY9WvuHIERE5PYePX3y8P7+2LFkuecUVTJ/vihqSga2evrx/ZuNcpD746R2HehaiAw3hmU53Lv/8O7tva2hgMQJeaNLt5oaCfLM5+XG3r337w9PPn6MnwtxOw/u3N7f3dneHA/LzEFNI4iJRc4XhaQGsJC6kRvtP54cH779+1+nnw0p958+ffLozmYuQlDVpGoGEWFmIoKZaoqqIGJmYp+NSZvq5M1X1evqcyDE4vYePH3x+P6tAoCFoKqqagBg3Rk2U0tJrQMKxAljtLVZhoPXbdteOQBXIflw4/HzX31xayyAphSDqpqBAFMjoIeYggAYFKbKwuBy9+FvqvH7t4eXo8AVCJe7d5/98sX9ASqYmSY1MwIxyFK/LxRG4I5sloxgREVe7v9K9r5JJ5+EyHD/ydMn9/dQNdEAWyqRYf3x7D+lnmgwNUtg8n7zUTYMB77+JGTz4fOnd4YATA1EIkxMpmoGmMGIwJ0IHdJgABHIksgY4dU4u7znFYjb+eJfnuyQxmAwsM9zJwRtFrHfkohFiFJMSkSmCmImBqFN5Da2t4c52acg24+e3ctTnYKBwS4rAICFeXknO3FMQaN1VBAzCWANnMtHG2XOl6LYVcMPdva3UIWUwERC2lhUMzKBJjBIHAsTACIAJCBiAgyajNmXZVk4tZshcFkBEAxg50jn7fxokka39wpplJ0T5ximSqJkBpdnnGJMKZmBSeCyvCg06c2Qs4s4z2x+evjuh9fh/u+2MgtGvsgYQBs0wWlKcEMBmpRCNHIsBJJ8MAj2ORACAPGZnR79+ONff2h/dS95CCCOkWJTLxIXmQCgJIiLRZsUQswEkmI4bNKFEHYVQszLvxyhOvjmu5ev4m5lgBBB6+nhSV03tnXnVplHNAdVMLD4TFgIAGXleFw3n7AJkQCAAcTQk5d/+u5ook00wAusnbz88ruqjfzsdxtbOfDqL19PBnu3tnc3ck5qKpRtbG/N5p+AdKfGAIBh04NvXyYlNQBOtGnf/OU//3tu5k93Hu4DmH75H+93Xrx4spGNaNF0kJ3tk+NPSrJM4wSYxhDUEBSA+Gp29NPLH35qALn35t14H/Tux6/f3OJieJczxAYG+OHmZuluhlAfzEFERjR68II+HqcQFcBw9vNX3/9o9wwJ2cevTncdf3sYUB++3n6SACaYwZXj8eBTEHRBgZhIhXd+HXf+Us9TMgAuvPyvH9LW/cLaxh//+ZuNwr2ZjJRmb/ZO2z4JmBQbG5+CdN5sBiY20NYzX5z8NO+da/L9//x9796LDavmJ4evQj7ImtlQ6vrwcBoAIpiZDDZGhdwIYe96u3e2KbYWhxsCEKGtmlc/v61Pq7a1tpkfv69d7gDVpoqTOvXOBVcMh/mNEHK5708wMRG6MMyACGZ/f/O/Bw3qt4NBqhenx6cNCZNAJ00zbTpRzUiKsrwZgqyLGwYiZkJfKoCdYPLdl1+/JehhcrENMSrFRsmzNQhBrbMJ4PJykPMNEGKfe15VF4lzjkHMNn/91csjhbNgKVE2QJjNgs8G4maDYdYRYOCs6B50HYSdz9yZJAQY+yL3TJKXFE8O3pw0GD9+OOJk4Dj5+efp5uOH5eQYz/dcQgLMjH2e+xslcd477gzIRDAjnxeewXkJmx0d18Deb39/OwdSrF//OWX3//D73dN3s1v3s4BEBAW5vHvQ9ZJkywVETIbuuYQshXw+m9eA3Hr+r49HhJTm37cfcO/Xf7g7Ozgq9rJeEtBSG2shF0U1Uyaf507rN38b/PBhIXlWjqmaGVM+Gta3d6Y5QkCxUeSd/KZw/Al1scu7I0xERGaqDm6QO7z/Y8y+/7Ed7+8W5Q+n2wXxg2e3aTDKq29pRyDbBQk0mYKIs+JmSGeBpSApGSB54XH4x++lOpXd54/9yTfvWOB+9++/hR8Npn/6ejje2380MgcNXTHzacgFdakZOCs3kF6/5rzcfvj8ufzt628WgC9/+cWw3N2Z/3wsd57xTiQBw6BM5G62CS0N38VIAsyIR/sP2ib4zVt3f/HiC2kPjipQfmdzUO49aRc/f4B/KN5xvx60jH9rJZHuCJsZiECsKkrjh6d1RLFz996DX9xlSjsLcvnzZ7uFuLz+Foj5eDxgwKiv6ugiY40kZgYQmGGqGD+2Gn60e+f23v4WZ+WTwFmxe2fbDTaKj3fyZri1tzUQYBkiz/Pe9ZK4LDu3CTHDNGL8IKspH23t747HJSBbibN8UGYkuT588nTy+N7eeMDnjCvXZcN3kE4SJqjGyONHuy18UW6MCk7EpRg5L2jZ0fDBv2XV/d882C09ALpcBK+B5LkXgpkZCCRkIdB4ZAoWFramJaMcxKTR2Ody5w9P42h3e1AIDNRVnp+CuCz3fNaVQFOMkfM8A2ApxJSU2DGDLIUkSm576yyWqhGrdb0Lsd4Eyc4hGtqgCnadmazTI/VH20xToKyzQ0qmAAMJBpBkeaPrIOSyzPU5npCqaStZJtYwDKopKQhIRkamxNBWNWdYG2IyuMwJkhlAviijnWvuaj6R5enSdnq4GGwPRJvUJT4CESyCDCBisxDbhhFjDFFl4IQSKQDyg3Ke1kFIfFdIMANpcfKh2ixJLDVqgCybeIMRmAiqysaWYmjqIKkoWbqk6vLCxXN9XfV4YYBFKen88PW83WEBUydFV/iRrTgEAcTMcTqDjsa9r7NkuV/p5i/neOeEAHZGqZ18eDXDQxJy0kE6SYj73QESZiFj09l75R1Dd2hIfJY1aR1EnBMG2KmlZvbx3XS0IGEn1MkCWvE4AzFEhEiSzT7EotZl1GJxbiVIXgMhACIxtfVsMp2H9dECXZQiJk7VcbPdWF8VGZh5JdpfLSS6VkZiapvFogkJfUd/ef/O6QACMWt13ExaA8gMlm5sTFnOINA2xG48BFVjQ1+JrwYOIoKBmHUxreftUpKU9MKyiymMxHvpvAApRO1njmZ27URueRczYl0t0vIZrG9A1qrLCZQJsJRUcuSOPsUAs5eLOmLn1xuexXuBAoAlpWyUlRnBVAEsJ0RnD4ReRmLy3vH5psTOZ15uOl1sZgTTGM0Pi1FG0H5uc91lZmBybgnp3NVlN0C6o2cAkNpFoAIDf32KuEABsWnXjC0VcpMkfcUBWKznC+Qy8NTF92u9hQBTNoJpaKPRMr2L79ucayU5V0qoZpVm+cDTeSNhq6juDEJNDbDYdi7V9xtZ5tcaHme7WKhntfpB4W5SF8HsDNJGXdpElk7dq++yJNRtaqGe1ckVueumTteave/GDdCwqOtFo0bM3URWVnbmy3d1zTgQq1mVXJH35TdWg/05oWdA23o+r+qkYAZAIqtV5FVJYDDA4qJaaC/J+dZ0afFSEovtoq7rqJAlZGXpJQiA3uNSaCPlg9xd2fva5QRouwiKbqjHwmvVZcuROWCqyMpR6WCKKyZZima94dnlGWvbpm5iQHyh5r4kiSU9fz0h+XA08NBuCnwx+i5r+K4OBOflMLPQRmPqplhC646w6QqDOCuHuZjqNerqD7ZZNxeWctwM0C4K17UbIrI2aWEl3hrI5bknpWsy1tn6/ocf7aQhNYs27+4UL2udsQvq/VTNSHzmoFBbDQXWG5psmToA+M3bsilN3Qz76aW/yeNtuSERs/OOTaH9XP4aQawfXWWb+zLgRdUoACN23q2Pwp0ghD6SOiEk3KAu9I/khttKWs37OohklXENhFgIICnKrHBMqrCr3rRS3RERIFlZtc0k29E+q/D69Gtm6KZD8OWGL1zXAC8nYGf7nyWEpSLZF1lTh2yRzpR4oyToJHHFyBWOYf1bpbXa6qi+KOZtM2iWktxQSCw7UgCSD1zedcKXkkj/+7y2sAQUG4t63iyLbCK5SV3a13HkitIVmRD6gImly3fp9hxCiC1hsIO2srOXtszrDW+my1E+i3deZLVGWfllgKKr6Q0pkBWbNjte9FMcEAuvDSvde7e+kGhSNLoaG1d5nSSmBMlysRCiLSXhG9WlKTnAqg+vs8G+CbOdhdyz3fsP+jmHCKXU1tPjozSPWE6ubzC8ppQSAD199W1RPlQvfObvPcTOjjAxzIydp6adHr0/eNdMAs5L8XWQLhAbQadvvi/vVAksvIyCFxVHy53IZWjb6dGHt+9sFjpJus5vDYSYyNQIGuaTVLXGzizpldbhTF1dmQrRtp5NJ8OFAjCIz7K1sYvFZ0Jmy8EwC4kATTCs2OWcQ1BVlwGDjC00TZs6lbrBqMzW18LeC8EAY59nov15ToaLI6yzsKJJuxZSY4qxc0YlPxgNVoZzVwyvZsuOTOuTQyudVpNaaaVoX+GYJvNhSybHJ5N5m5Yp78bMmJr5fJQTASnFpn715XRz4HQxay5DlkNzswQ3Gsn83cHB21kiqDIYcVEtwroWO8xPjkdDYiDFUE3a4y8Hni200a4pWLqAo+A8p3Z2Ovk4IaEUMwi1k6OTel2LHaYft7e2iYAUQ/3hw0+euetL119GzLAYU7QNx6qAoJkcndbrJhJhfnJaJSbATMN87RdRrr+yrnwEszZVvaKuizkvzk9PKvMAnODiW8/PuEiccx5wGduFL5dcglQnk1odAMdM15WwNzFY2DnpnhC28gWUS5B6Nq3iMm/cWAFfIwd36QiwFEO0dadL22o2n88z0nkd+h7wYoF67fa9Z2pbTU9PHGEyOZ3M23WG19DUs9PjIYfjaW+5NV3vJQhgZrE6/fh2u5R0+v7Dx8liHcRSaKrJqbnFyWwRzyFXQvAViAGWqsnRh51NF48Oj05mzVqIpdAu6kKbugm6VNcntHV+sy6q2cT5MJ/Pu++KXA/RLmtRTEn/0RMMIMU2BAohhBhXPPj/ALU7R99e9smFAAAAAElFTkSuQmCC", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Pullover\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAAAAABVicqIAAAJSklEQVR4nK1ayZIctxF9mQBq6W2Gm0SHJdkK++KwT/7/k+86+ORwhBQObSRnOJzuZld1LQAyfUB1D0krqtpTxNxmweuXiXz5kBjCyCJAAbu6fvHHv3y7fH+zpeWK6vd1NLlzmaMYef188e67f/xrbBfAjv8YAMjm5SLPXF4sqMwpehGTOZdZipGKzGVZRjoXxCyefvni2VVpJaeyoEWxFJM5mxmKEaurImw26zbKCM44CEEB9+TrP//hmy+K/kmLzKFtGhjnjGPEgMXCZS9fvt4d+zCLiXv2p79988VVLiGCWX3Xw1rLFghRCkfLN1/dQOI8EHv9+2+/ui5Ovxl9IGMMERCi5Az3/MWL+tiM7TANYsrrZ1clQ6EgAAZEUCVFSoQr15tlzo8FSbmkbLnOuZcYIpFhqEJVoYqorEuoLZZl9miQhMKuKC167ztPxjERNIqIKgRGMyecl2VGjwYZfifLGRJDCKRkiKAxDiAaFWTzsnSPZ5IWWQtAySgxE1RVASKAhJCKNbczmSgRAyC2BsxQUQUxEWmMzADZvMjtTCbMzABABiCCQBVExKRQggLs8myUydgHSIusYQBDjAj4aDuNUchOgEwz4cwyoHISJyJWRfqCSoxBbTYXxORuYKKJBhEGBAU0mACTZWYOCFFRZgyIyG/+XCVG2MzNSrxxy2VuU0qYAYWoQEEDJ5UoZPN8DhOy5XpdGkBUiYgUqqJKSG2TQBqVbOZm5cSU61VhARFh4kFoaDhhRABUyVjLc0BssV4VrBCRYWtmHWol4dDY9heCLFarnIVCiKQggJhJQhgONIFYSEeb73ROXLleFSwcRSSBWMOiEUO1kzGqMYQ4hjLNZLMuTNJEYNAsJeZTabIRxL6fA0JusdksjCoxkwoZQxBRGBZRTSBRfduF3y6jy5i45dXVwkDJglSMceR7D2tZfIxQENsgXdP6mUzWpVGQSRkx2u9r3mxygQoAYmbp29bPYWIX63VhkocgYkP9/Y932dfFkqMHACImDV0f5jCx5WqVs4ooyFhjtP3lu/+s/v7smVoiIiIGxM9LPGy+XOQsKqLGWkNS/fjdP5+t/0rkEgiBNPrxIzzVtDgvC0cDE8PSHd788O/vb1qiQUgIUJFZxQi2zhmoiBJbi77a7fdo/OAkLlxTTIiNIYiqKlur7W5bCbKhe6iqKoiYR/VxGsQYZqiKgq2R+t1dJSgyBkBESQmYkzV+NAgb5xgqCjBTrO5udx0MA8Cp+kFseHSfSRCXZ4aQwgK/v3l91wzpcM5AREFkZ/YTdpljUkmZ7nZvXt0dwaQAnHpIFIBnM7GZZUpaCPj3tze7HhAvgLGc3BimGtd04i2S+SEG/OHubQ1oXx+6oKe9NcbxrjUBQsQmeUZiBkJ1/64DtN3fH30UEDFBY+j9LFlJZluHXh6O+10ApN3fO0MESreVvhsHmfTCp3AzMxD7phdA+vowdBAiQIL3cWyPSZtKJ4kiApDuuNo3xy4qMRERVMKEQE6C6OkDE06XSGjo2i4miSRA4tm9PBJkWOkkDWoroWv7gJMQawxxrDH+PyAS49AANfRdLw86fDYzc0AUBEZE06RWrrHvgxCBzpuPVuNlTJSIKUp97B/CNTC5pOIvZAJiDVIfewEA6eqq7kfT8NGavjMCEAUzxaaqOwGAWG/vdsc4HqMP1mTFA4CKYaPheBhAwuHt6nd1OFv6zxMuBTOF4ylGsb5fbY8RPKBMHK7LwgWAGKE9DjmJ3WF76M6XE506wtPaxcOVV0N3HNyo+Pp97QFmIkrFOIpyUbiSiQtd0ybzrkGrJhCYOd1N47yKV40JJQlWkl5VaTpJSk/pmj1Lu8T3kcCGoaE9Nmfznj77oMJTAjmRE419EwTMROLb40fXED1lXsJMgYxd46FsGBqD/7QBpmhNhmvqdInvfFTmdO99cFcfn6aZbkWi91FBIHbFojhPAdONcbCpxoyb4SkQDb4PChBMtlyv8k9AkoM01o7OVqZAYt+2XogAyhbrh/Hvh+Ga7epDU1WdEEFgF6tPZ8yfR7vU1+8PjTCrqF1sHsI1jIwNDzetWXXi692+ESaN4hbrVWE+/mtmAkSijHKZCpevdrtjZIbAlpv1p+FKbn6ukeir7a4ORCrInGwWD0xOE+gLpH4CRPtquzsGIigyNlefhOv07DWR+Ckmvtpu6wBSUZOZ4uFxQSUp5MTfA5jOSX/Y7qpeT8f04SOrRGDCb53WJeGqeoGqBO66s3tXjUFJB4yZRsLX+13VRVUVTx9KvUo4PQ3h09n6p2tSVtqqOvZRMMyZz/qh8oHsz1RhSFc3fZA0eLDZQ+pVdRjg0tRIdariAd+0nQ8KNsZmxTLnD8w8ziCj8Zr2XbHrQxAlNmyzclEOoq4qZy2ZyQSARhkiwsbmZeFSPepwUVQNMuVTL3CQZFzmDAHExhXF8IoRfdcFJdXoo/I4ygUgbIw1zESU3rMTE+nbzoMgsQs693QBiKFPISGbL1elI+A0eKRpZ3cZSGz3232nhkGmXG+WA0ianROgEsIMB5kkNlZvX2FTsAHs4urJfZY67sCACDJ37AEgHG5fWc4MQc3yydO3+XAxSk8ERKT/6/o+XpeEq767uW+ECWpXz19cF0OWz9vKZ7jHx+ru9vpLNVB11y+bN4skimzSizZU4pycpH9fiPW72+eNGojajbTPyjPIufnOnAsDkGZ7+7KOgIpZ4LAZcvLhzhOtazwnRESQdn9zd+hFRcDWmpSM2B2bXogJYOvGbeoYEyKAFNId9O59F5yIIsYYIwD19X5f5GwIZLJizlN5qggf2/tD61lUNISYem5oDtW1GKb0VO7mvserhLrpvAMTrMuLIogi+r6PxAQ2NsvnMNFTNUTvA6wB5c3zF89RB4CMdS4DsqIsy8c/+n94LEPXS85A9vT5l190bVCwzZwjAHlRTORk/HSd/ad2VdUEACBXrnIGRIYjANWJnnXp5K7b32zCVaF9f//zu6NXwB/e3S5yEOSw3R+Oj384e4hXe/cTVU9zv9++/eH7mypC23c/M6Sx6ve/vL7dHsdmtpcy2f5KfZ03b169+fXXmy4A3fYVOxty6bavb+72zWcACcd96Tiv3vz0+uau0vSd5W5fFNLs9u/rZs5j5mlJ6NquQ3usDnU3zCH7tut7I33Xez+uwv8FU8faPheawW4AAAAASUVORK5CYII=", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Trouser\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAAAAABVicqIAAAI+ElEQVR4nK2ZW3MbyZGFT2ZWdQMgRWlW0tgzXu+sH/zg//+LdsPrcWhmJBJAX6oqL/vQoIb2A7pDIJ/IQER9PFWZJy8grPxQ991Pf/mY6mjmDodrs4AADhZBaD3/9unp+hlpjSG53+0PkqBmDsC1aQQDAIvAJLrc5XYTJO3evHv/8ftcxqbqxBKtNbUAs7BwtDmj1hhV/Zsh1D98/NNf/vpjLudaq0nXUSulzi1S34vAptNTn7u7p+O5fjtk992P//3Xv/1nN5/LPGu6O0ibpnEYrb+/y+z19PnXw92bt79i/nYIujfv//DDDz/mep6nSfOb+9SmcTiedf/2bYbNTxLS7Xp9kiuHrEGk3+87DoBYMichkrzzZna4u0veNPeH5lF74W+HEKfMOp66OtemHlMwTJ0EIgRvzbjrSyHQtVPWQzixDo+dVTUNdiWOsOBggmutipQlzDxugHDuUtTZ4QABcAKCRDgLES3/RGitdgtEur4jLZRSMBlJJoQTJyRhYknC5G08jeVKmqxfV+4yac05UwtQzuwK4kTCBE4pCaycjqfZboEQU5gahMOcWRgEMIjhBEmJ4WU4nm9S4lqreoCYmQgR7u4BorC2SGG08TzUWyA2j1M5sDARCOFKZhZB5NVYJOUk0PF0I6SMYzUAjnCHG9w8iODWpBMWRrR5nG6KrtBWSy0lETdVInAQEXOYuXFGmLVa6m1WDwqvY+YwsaYixMQCoghXyg6t81z0WmhtgnC0kVxrDvMsmYmCCGYwtIYyDWNZYayHMFO00U1LhtOOc2ImYrCGOktM47C82W1KKBSmbUoRfAgRIuJMXtGaqZ9OU2lBdO3d1yBBxDCrZUwULg+REiJJyjajTRPrcByKgcVwBbOqBAhzR0SYp1l2OVImyilFnaq1aZyqEfNNLuzWGsK1VWuecf/QK9J9v0/sdZxrLWWu5tfLyXoyTufu0InrVJrnfD6nKPGODrvQeZxqra2qrpSTVUgbjjnvcwx1qt6N44DpS/3Qv++8zdNUW9XatNnVJ9ngXcP+rXTG1pqjzHM8/vM8/3H2sDKX1qqp2dL2fTsk3BbXZUaEm3IZhrEGUbi11tTMHbeFMEiSwNS526ExMwAWEWYmRLiZuxPLbdFFKSdSDdmBmLIwp9x1OQkTM8LDHSQiuEZZt5XlLE7ZgS4xIWK5PibC8t5Et4VwhHsQg4lTcJ8TaRnH6mBmYSYiIkTETW+y1PdkBAinfZ+iDU/3k4JERFgMi7arlPWMVwsWwINS12fRNjy9GRXEIizMIET4dSXXWtgLxRyEcA8iIng5H4+TgpmZFw3hfmueuHsgXJVIzE3n89PDZCQijHD38LUnWbX6cLeIcDMFWFXbfD6eJiMWYQozN18Tsm714e4LBWxqpnUap2pLbC/J6DcqweXWIyIWWaoKs6Xdg5vFWmhtgTyLWYB+8UK3ICaYGUC4FRLPUpa8djUHGO4OIrgZvYYSX2z4xV9YvHmBKIWuGP2Whrup+qKDGeHmgWUtQQS4hs3XJ6BNkFo1AAKYmeDmgXBTj6UB1/ZKEAcREV1SHIsNLIGtrdZ2vcKv24q3UjSIL5DfLT3CzczM1jJxXUlYnYtCwKClIhIAMLO7amvqQXy9mKxD4K1UBYMIfKm7AIgZYaqtWdA6Zb1otbasoPDs7vT8ibstGX+rEsDU3M1MUxCn1CW5WABRhKlHuzlPgHAz9dZUA5xz1zHCzYKWiFYr7TUgz2EUxJKSLM/hYCGEqV3dp22EwC8IXAokEFpmChYhYMOTbIOoLk0kwVDVEW0ak7HIkj03hzCW+/IAMVGolubwOg6dLREtJDeHMC6tFziYKcyqBqJO54OSCL8aBBGglCMxwtwc8DoO5JCUcyZXeQ0IEXHqIzMizD0QdRyEg1LqOrbGawa4AUIglpQiJYabGuB1OOUOIkkSsH5dq80dABZhREjeZbS5OrycT5NL5qXzW42uDRBiEbS5eH+3p3mcFV5Opwl9F/PpPOtKj7pRieRMOhfaPxxoPk8Kn09PM/U9lfNQVvN9m5LU73eZZffw7g7jcdTluqLbdUJLJ7lywoaH5/7+nVaT7//4H4zzl1ER5XwcVfYP778fhef52nZ7K2T39iOkTz/99GB0+nUwRD2dRpX84c8xuB6n/BqQh/cp79Offzx8jtMXM0Qbh6nx/sN/5WEe+CldH3435UnqD31QfnefWecCAFZLC969/YjzeGyHV1BCRNwdLGXTaW4ALkUr9fHOd4NMh7Ty8tsa7qBuz5jKcdKvaJaO7jVlOr+GEkQEp55Qhq+bWSJiSXxQEbvr1xxyCwREzE4Ms+eGlIiIRfpdWJfXbmujC4uEgOl5KUDMzExMklJalgY3Q8BMxLGcfMHyc+OK1RFom3ctp9LL5cbyO4kQ1jZqGyGXZQq96EyW8YqIYbWu7Z63QJYaT0v7c5kdwz0iiClameuto8MzBcQUX+e6ZYUQIAotpb2CEoRZgOgl5CJpmU3XBq1N1+WmBhaEPxfB5ychbGoht4SwmzmYFkUXxjKrghBX96jbIQCIGC92Z8/rg41KtpRfyX2XXtbY8OcOnH5fV9wIybv9LtPLVaObusciZJ2xrWjlvuOvDw0gQk0vIey+Hl0bXfhfPGVZGl6uy9v6OLc5hF/aSsDaMrIQrLXXmLRC61yZOCL8Eq9hrV0SVGvRV7CVqOMwKb7uPADAm5p/tZXXmBmnx9/ucpagFxmhqmoOeJvn2784A2z49Z+Hw73Ey6fX1lpzuNUNkA3XZcNvv3yZQoReJLdqU7WA1Xm++g32RkjU8Tzqv025fnHIeKXrAiI45cTpZf9OkrtMuxR1fpV6QiySMvhfGJy6ntCJ11epjOHamhm81fa7g0S4A65lXleyJYTHL58+/PK+1k+fvoxfz9Ph6XPB9Mvnp/E18kSPP2P/4e5h/Mf//uPp+Yv3mB9/Tgcf/ufnz8PqDnJLnpzV3vzw3XD+v79/Oj033FGPn3jvx79/epxWvX7TAqfJl8cnHL88HuevN6PTaV/s8fH0e6N/CyQQZS61llJeuHpYq2SltHUG/h+njzIeBCxmbgAAAABJRU5ErkJggg==", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Trouser\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAAAAABVicqIAAAQGklEQVR4nJVaaY8cR3J9LyKPquo5SIqSvDBk+P//Gn83YNi70mq1kkjN0d1VlRkR/lDdQ4ozTXILIIGZ6cqXcb24mnh6SCAC4PZj4OPn1ZtdUVEhTx9LSaIt+3f/WPDFJ335IwAw7HZTFhEhSQYATQylT1drfPHtiyCnV1VVRfTm7TejepDC7R9EIqzP43CzWO/Wu/u/DHK+Xpp2Qx2GN99/M9q8BIVUUU2MsIjoy/5wPD4+Pj7sj18lSQBngzxpQKfXb252Vzff/eVtne8fDUJVzSWLtTVEGN4Pf/z622//1Nj/y5KAoDCNr95++2p3ffvtX96W4zD1IFNKuRRpyxKSNCXMv+2GTE11WT3iJQu9CBIA8m4suYw3r1/tckE/PBSXqh4QVaFH7+b0BKrk8cby7f64zsf7++MLpkl/PptPuhrefns9DsM0Dtr7o813Q0nCaJEAb+F9aQ5ILsmOXt+8crN++P2v/7d+AeRPz+7bH97sxpqVbX9Y93d1vL6Z0FsEHG7NVjMz5ly4dr0exiSx/5mPh0f7OhBJqX7z5s3rqSaFMbzbMs/OSBRIuPfevFlfVq8j1D2Yx5plsvfv27vH+VO7PAMJAPnm9vq7t7uM1hHW1iDMDL5M05Bg3axbkD4/9ClRzVZjDDX78H2bfvzb/DlJeLZHefPvb25vSszmHu4RIm7m6+E2X0999u4OSmDdL3GjqbVjzIcyjIlvx9dl/3v/GnXV2++/GxOsrd0iIKQQPi+L3DJBAJCBcOvdggxbra1l7UO9vtkdfhofvghC6rC7uUnWrHtEOEBA4Wuzw7z2kAw3631pHNKg1roDEW7dIpV4/fbbtKz9AgiJAEit4zQWLLMHlM4Ih6Ri3tpyPNSkNXtb23Jc063U3NoaSlVlmFno7tsfhvf3HQDPzPFMEkoep7Gk1mZPScTdzIMpe5v7enjMU80wCbaj16lKrNZCVZMq3Hrz4e1RY/4TxzxXl5ZxrFng5tyYHRGgQAX9cCdSqxg6vYeOoy+tmVNUVYgwR7l5uz7kizbZGDLVccgEQLiBAMmAW0CzP0ZPN2NuvqoQmnKzZXWKSEpCQTh0unkY0yWQ2GySx92Y4EHxMBchRBAGILPtD8v0b0NdlpRUg4S3dQnJQU0KYRik7q4Gfbr0BUnGaUywoIg7AgKQsAAT7bDY2xUqBCgOt95bIyGakoISDhn61agX1bUZPo9XU4YHKBGI2Ng7nEyyejvu94++HOfugDcuaw/RlFREACKYhnSYLttkAym766mEGSQisCVAAkERzwo7/vHr2A6HoyHacW2rU3MpWYUgECzTNO8u2uQMMl3tMswhAYd7BAAGgyIpZy5/DIOvy9I8mktvkFyHkgQQEmDayX76EkgadoNGBKikwzfFIQgilQLfvy/w3tYWEHGH5lozAaoyApJyzfJ5EGgdh+jmIAQw704FALpThoi03mVhtGUNUSUllTqIWaRc2Hts0fslkDLU3gMgSNC7CYKgu4kMOaJ1zRnrvCBVEWqpwxBuRJ2wuPWlzS0YnwOh5pJDGMKI8PBuSpAIA7Qy1rmJhbRlRk1ZJOVSsi9GlhGxdFu5GrV/BkRSUlWhCOFm4Wa23SoiAiLoCI9wN4MDknJJyu12NVbCnaF1PHq8GIwAqCUpIQpJsMXcukWA3FDcAKRg0hPFipZaE9yCknJxZURQ6u4aS78AQqaaGQFhKtJtNvMIbEU2EW5OJJGUQkRAES21KsyCmlJyIRDUen1r/qFu/QQklQ2ETFVX3Rx4q+TBLe4hIiouIhBStGSFB0REJLZPSp2uj8t6wSaahzHTzEHVZBIOSUgiJCAUkNy8DpIKErGRfFBEGO6gbNQyDomXQOo0ZlgPP+W6QFKoCoUECdn6hnAwB5UIUkUiRCWshVMlAlq/AFLY+5amSFIIiFKUPEdOBFwoiRuFkkKGQuDdnUJ4pDrWfAlEyjhmeJdQgKIphSO4+QQABOARVFAhGz1vL6qoRg8H4Ait0/gRtXwKUmumWTAiwFS7dPcIBzeODWxlmGjQwyM83EkyS8p0Nw83hA7TcFmSPAyJbqCHO/PgXFo4Ik7J3t3cIyUqjB6BMDMIc8op0bt5GOA67MbPgNSaGQ5xD0iuHtY3JVEAhJuZQyCyAQPhRggkJYW7uyHgOu6mclFdeRiyhEeIByQVt4VbRxEBbnkySAoj3INCuPuZDSPcegg8Tf2qXgaptQAOowUVFqvgzCoMYuMvVTKsuYgI3EQQbiAR3ltAWCquPwuSzcNDHKqhIoiAgBsGAJKqIgxrkVQF4R7wDqUIvDeHpjTIrly0ieacwsNcQ7NYX5sBWwPmpwpEKCJPpLyBeLeuTCoIc0C0oF4MRmpK6t7NO8sYh8PdYfHTDIIuICkQFUYEQBHZynt6d2UtIggH0tZYXgCBiKoJvCcZptbvfl+CAgYiyBBIBCmC8IBARYWICO/zqmknSYkIbh6PCyAU1VO6kjKgP7yzUhWBiEAAFAS5HYxtNgEA8H446mAqSkR4gCIXQU5/jPCAJqyPD8Eqm+Oe+CyAkyd//F5f9nLtAiHCsfXhFyXhZtMtdcZ6eJRRJE4zgAgq4Ob02BLl9nsyfDnIEkIlwuFMki6CgCIbPUUgbNnvs1ENZ5qkkt0NuqnP3Tf1h69Hri4qZIQ7N2tdkuRje9k6HwbjuQNDBJgY7SwX4iwJfT1wcVXdmlsqkl4KRvIphQLh7XikBbceRYiAKI3hJ5uEbxWAINYjlpCkgi1b8iIIKZQ4TecQ1pYl9QC3kIiIoFK2+R4Q3NpWCOntgNU1bQlURKmqL4OIUHguHMK9t7U6CICiDPOA8IOu40mSiPUYq4vo9rqIaMpPVeSfhgVpK9POZ7j7qagNUGFnrztd48kmAKzNsdhW18dWraeczpXXBxBCS9Ytn57OgGyl5EbvHmFm4qCcIl3OGG7r7Otmvgg3j2AqNSw+BaGW/JEk4UGKylP3izDpXR0iqmIb/caWa6wtttqpJ4OZi+RaW2ya+ChkTiARgY21u5296lSpnhRIkZN8Zw8G3Mys+xMfgLk+EfFHIJpyTkq4h6iir2trBqoKnkrVjZc0pS2R+HahbeQa1ttTZ0JJpZyj/iN1Sc5JT/k0Zfb5uLTuklLfitRtIhwQJNVTAnaQoslTLl2steYBQghoLiUvz7zr6YIWKhl9Ph6XHpAnSZ7Ms026z3oR1ZRzBvqizc4lIF+UBCdHMnNoKdL8MDfzOBPNE68DQPBkCwCQhKSMPh+kQYQRpyu/AIKIMOu9G/NQtc2Ph6X1c/vLc9Ea4Wa2JS85dT/M4uvx8YENW2W0Ge4ct38CcbPee7ecx6Lt+HhYzU4dyhkGCHczC4dCtrm9JEm0dny812Bu0v1Jqc9BSISZeTAPOZbjYbH48LfTxSPcaYag4PQLClXo6+GxZKYk2ynQF0C4OasHKCnnZutq2AYaxEbQQjjcXTaD4xwtoKScsByGKaecBH2O2aj6XBLVpIxzHHRrHSmXnJNww1A7hePJqXjyMXdKHgbphzEXzTlx3S/7lUm3bP2xJFpq0bNfpMW6M0etOT25sCBsy5sAKaAgwt1dWKYl23ycICkl9oMcTHJ6JomkYRqyQBS5FBFvJkMa89MkkSIOxzmmRTZncXen1N1aYl2ag6KITk/DWNIzkOH61c2gIklrqRTvUa7jqsDOdwfpp1rotKnZINwow1VP6n1ZaJtz16tX87v8zPDD7aubKYlm1GGARo96w6scvVt82BacY5+nmsHdzUOGa2OO3lb1oKako7fl11OP8pEkZbyaSlJQhqE6+9J1ypOaP3X9PKvt1Nu5k+buplJ31tCaQcQ0qSb4bhqeqQtMKeeUgHR9NTWuh2OUkmBwnE7dovFUAbi7+abAgJRp9aXn8uaNPeyR9ZTunqkLQUm1asrXr66OXB4eUhJag3MLXpKCrS1GuHXrTm60I2Woj3sfr/7z39dffmkpSw8ze5Z+w9raKrSWMg5l6Ye7x2lHeuNW6gS2WhinKtO6dRP1QERIHmrs19v6/X/M7d7pth4Px/N45QOIr/t7sbUNdUBqD3d3D3OVnGAMoQq3dEvBmSfjXGm6Gcq0ZDs87o/z4fHhIdQPD+/f79unktj+N/tjGGqtdXc7Hv/3l/tgnYpEhCCJwK1h22aeN6cb00RfyXobO51//e/Xfz/+9Pe905bj4927+VNJ7MH+KCmlnOp0u+s//v2h6rjLvQUpSSW8NSQRIUUVIhRsGa3PUceRt7X987/e36x3d3OEWVuX/eG5JAchSNU8Xe/47tdDTsNO5hYUVSLcmihVSBUBuQ2/Bd5X5lJxPeB+/6P21rq7n/T5qeED55XX436Ux70zlcoGQBTRvTsoKQuhylORJ7LVWdDBpqnGw5197XZuQePskFwKBCEi6OHNqbkWASineTHlVA9TNA9X1/ftpTXjhRVgdxhYck5BgEK3HgbNpRSJ4DZVOY8kNjaTevVq3+3Z1uwiiBnAYahZ/NRGWA+olnSuJp4S/1Z0ww356tXj4fDScS+AbPrW4fpm1K3VcVo35lqSWmAr9E49AsO6uvd1MZleP97p8/OegxAgHcBw8/r1JO4OuIV5SNlV9tUjoClvdK8KN6pZnxnYvdm/Sx/d8zOSkGBA6u03ryaxHoB7eFDLMFpfm4ckpG0RtXWY7tZnAOOrh22nwU+OvLT7ZdldXxWYObjtTlJWeG9rENvAABCRcEa4974K6243yEuHPQfZJGUar6Zkp54DmosWn9d57cwpp41XqMlhQribQVOt6atA4vxfGqZB3S0kHEzjJN7WZW6etZSslDN14TT7CqZzfxMfNryX1HVe0k2j2nnrJLlwWQ9rc0pKKek5Sz71LQHRnPUr1bX5BvN0fcO1tUCEo7cZ8/5g2Crcc7na6b0zhSSH5CJjTZ841iUQnkBuX8WyzA63cGCJ42GRWkrS8yrNmrgvjXmQFNQy5qkmsZOIH+nrJRcmAIjmSlG0tUvAl9WWuZdUB4mTmiLCYK2R3QNU1dA/tc+fVRcAoB0eSobWjhIR7gbNqU5XFd1VE3upPeVk0iTXWrKiYX6cn3934RIIA37/E759fTNOg0MQbZ1ba1F2V8nXrkOlMlvOmQFJmsuQcXz//sffzl9diK8BsT/+5913P/yQp7HmwlgO+7UbtFa2ZZU6SEkjcsk5J2WAEvPdLz//9PN+o/rgF1wYAcAf97/fYxyHqUxVsSRZPaCqQQ+qKJGYS6lDEV979/Xht7/99R+/Hr9aEgCImN//c/KHV7txzNKX1bbmKdra4KbRjdbbumgsh7n19eEfP/787rG9dNYlWgGA+dd4txtLHYasFEmnzX43qDLcQ4WgL4/3h6W3+f6Pu8PyouFf8rjzI7mmJJrqOA7jOOSScjr32wgPJ7z3ef/H+4e5u/XWzF78LtHnQM6P7qar6+tdLSVnVW6LufBwRF/nw/273+5fSrofnq/5pprdG1JOJAD308ggHAbv63I87h8/j4H/B3D9sapwrbYGAAAAAElFTkSuQmCC", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "T-shirt/top\n" - ] - } - ], - "source": [ - "from IPython import display\n", - "from PIL import Image\n", - "import numpy as np\n", - "\n", - "class_names = [\n", - " \"T-shirt/top\",\n", - " \"Trouser\",\n", - " \"Pullover\",\n", - " \"Dress\",\n", - " \"Coat\",\n", - " \"Sandal\",\n", - " \"Shirt\",\n", - " \"Sneaker\",\n", - " \"Bag\",\n", - " \"Ankle boot\",\n", - "]\n", - "\n", - "\n", - "for arr in test_images[:5]:\n", - "\n", - " res = p.predict(\n", - " data={\n", - " \"conv2d_input\": arr.reshape(1, 28, 28, 1),\n", - " }\n", - " )\n", - " idx = np.argmax(res[\"dense_1\"][0])\n", - " display.display(Image.fromarray(arr.reshape(28, 28), mode=\"L\").resize((100, 100)))\n", - " print(class_names[idx])" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "在测试完成之后,删除相关的服务,释放资源。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "p.delete_service()" - ] - } - ], - "metadata": { - "execution": { - "timeout": 1800 - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.3" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/docs/source/tutorial/train.rst b/docs/source/tutorial/train.rst deleted file mode 100644 index 2a70cbe..0000000 --- a/docs/source/tutorial/train.rst +++ /dev/null @@ -1,11 +0,0 @@ -=========================================== -模型开发 -=========================================== - -.. toctree:: - :maxdepth: 1 - - pretrained-model/pretrained-model - pytorch_ddp/pytorch_ddp - tensorboard/tensorboard - checkpoint/checkpoint diff --git a/docs/source/tutorial/xgboost_breast_cancer/xgboost_breast_cancer.ipynb b/docs/source/tutorial/xgboost_breast_cancer/xgboost_breast_cancer.ipynb deleted file mode 100644 index adc324a..0000000 --- a/docs/source/tutorial/xgboost_breast_cancer/xgboost_breast_cancer.ipynb +++ /dev/null @@ -1,529 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "# 使用 PAI Python SDK 训练和部署 XGBoost 模型\n", - "\n", - "\n", - "[XGBoost](https://xgboost.readthedocs.io/) 是基于决策树的梯度提升算法([Gradient Boosting](https://en.wikipedia.org/wiki/Gradient_boosting))的高效工程实现,是一个流行的机器学习库,它能够处理大的数据集合,并且做了许多训练性能优化工作。\n", - "\n", - "在这个教程示例中,我们将使用PAI Python SDK,在PAI上完成XGBoost模型的训练,然后将输出的模型部署为在线推理服务,并进行调用测试。" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "## Step1: 准备工作\n", - "\n", - "我们需要首先安装 PAI Python SDK 以运行本示例。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "skip-execution" - ] - }, - "outputs": [], - "source": [ - "\n", - "!python -m pip install --upgrade alipai" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "SDK 需要配置访问阿里云服务需要的 AccessKey,以及当前使用的工作空间和OSS Bucket。在 PAI SDK 安装之后,通过在 **命令行终端** 中执行以下命令,按照引导配置密钥,工作空间等信息。\n", - "\n", - "\n", - "```shell\n", - "\n", - "# 以下命令,请在 命令行终端 中执行.\n", - "\n", - "python -m pai.toolkit.config\n", - "\n", - "```\n", - "\n", - "我们可以通过以下代码验证当前的配置。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "# 验证安装\n", - "import pai\n", - "from pai.session import get_default_session\n", - "\n", - "print(pai.__version__)\n", - "\n", - "sess = get_default_session()\n", - "\n", - "assert sess.workspace_name is not None\n", - "print(sess.workspace_name)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Step2: 准备数据集\n", - "\n", - "我们将使用[Breast Cancer数据集](https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+(Diagnostic)),训练和测试XGBoost模型。准备数据集的步骤如下:\n", - "\n", - "1. 通过 `scikit-learn` 下载和拆分 Breast Cancer 数据集,使用 `csv` 格式保存到本地。\n", - "\n", - "2. 将本地数据集上传到OSS Bucket上,获得数据集的OSS URI,供云上执行的训练作业使用。" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "使用SKLearn下载和拆分数据集。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "import sys\n", - "\n", - "# 安装 sklearn, 用于数据集下载和切分\n", - "!{sys.executable} -m pip install --quiet scikit-learn\n", - "\n", - "# 创建数据集目录\n", - "!mkdir -p ./train_data\n", - "!mkdir -p ./test_data\n", - "\n", - "from sklearn import datasets\n", - "from sklearn.model_selection import train_test_split\n", - "\n", - "df = datasets.load_breast_cancer(as_frame=True)\n", - "\n", - "train, test = train_test_split(df.frame, test_size=0.3)\n", - "\n", - "train_data_local = \"./train_data/train.csv\"\n", - "test_data_local = \"./test_data/train.csv\"\n", - "\n", - "train.to_csv(train_data_local, index=False)\n", - "test.to_csv(test_data_local, index=False)\n", - "\n", - "print(f\"train data local path: {train_data_local}\")\n", - "print(f\"test data local path: {test_data_local}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "上传数据集到OSS Bucket。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# 上传数据集到OSS Bucket\n", - "from pai.common.oss_utils import upload\n", - "\n", - "\n", - "# 上传训练数据到OSS\n", - "train_data = upload(\n", - " train_data_local,\n", - " \"pai/xgboost-example/train_data/\",\n", - " sess.oss_bucket,\n", - ")\n", - "\n", - "\n", - "test_data = upload(\n", - " test_data_local,\n", - " \"pai/xgboost-example/test_data/\",\n", - " sess.oss_bucket,\n", - ")\n", - "\n", - "print(f\"train data: {train_data}\")\n", - "print(f\"test data: {test_data}\")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Step3: 提交训练作业\n", - "\n", - "通过PAI Python SDK提供`Estimator`,用户可以将训练脚本,提交到PAI创建一个训练作业,获得输出模型,主要流程包括:\n", - "\n", - "1. 用户编写训练作业脚本\n", - "\n", - "训练脚本负责模型代码的编写,它需要遵循PAI训练作业的规则获取作业超参,读取输入数据,并且将需要保存模型到指定的输出目录。\n", - "\n", - "2. 构建`Estimator`对象\n", - "\n", - "通过`Estimator` API,用户配置训练作业使用的脚本,镜像,超参,以及机器实例类型等信息。\n", - "本地的脚本会有Estimator上传到OSS Bucket,然后加载到训练作业内。\n", - "\n", - "3. 调用`Estimator.fit`API提交作业\n", - "\n", - "通过`.fit`提交一个训练作业,默认`.fit`方法会等到作业停止之后,才会退出,作业结束后,用户可以通过`estimator.model_data()`获得输出模型OSS URI路径。\n", - "\n", - "更加完整的介绍请参考 [文档: 提交训练作业](https://pai-sdk.oss-cn-shanghai.aliyuncs.com/pai/doc/latest/user-guide/estimator.html)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "我们通过XGboost提供的SKlearn API,构建了一个XGBoost的训练脚本:\n", - "\n", - "- 训练作业默认接收两个输入Channel: train 和 test,训练脚本会从 `/ml/input/data/{channel_name}` 中读取训练数据。\n", - "\n", - "- 训练结束之后,训练脚本需要将模型写出到到 `/ml/output/model` 目录下。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "!mkdir -p xgb_src/" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%writefile xgb_src/train.py\n", - "\n", - "\n", - "import argparse\n", - "import logging\n", - "import os\n", - "\n", - "import pandas as pd\n", - "from xgboost import XGBClassifier\n", - "\n", - "logging.basicConfig(format=\"%(levelname)s:%(message)s\", level=logging.INFO)\n", - "\n", - "TRAINING_BASE_DIR = \"/ml/\"\n", - "TRAINING_OUTPUT_MODEL_DIR = os.path.join(TRAINING_BASE_DIR, \"output/model/\")\n", - "\n", - "\n", - "def load_dataset(channel_name):\n", - " path = os.path.join(TRAINING_BASE_DIR, \"input/data\", channel_name)\n", - " if not os.path.exists(path):\n", - " return None, None\n", - "\n", - " # use first file in the channel dir.\n", - " file_name = next(\n", - " iter([f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]),\n", - " None,\n", - " )\n", - " if not file_name:\n", - " logging.warning(f\"Not found input file in channel path: {path}\")\n", - " return None, None\n", - "\n", - " file_path = os.path.join(path, file_name)\n", - " df = pd.read_csv(\n", - " filepath_or_buffer=file_path,\n", - " sep=\",\",\n", - " )\n", - "\n", - " train_y = df[\"target\"]\n", - " train_x = df.drop([\"target\"], axis=1)\n", - " return train_x, train_y\n", - "\n", - "\n", - "def main():\n", - " parser = argparse.ArgumentParser(description=\"XGBoost train arguments\")\n", - " # 用户指定的任务参数\n", - " parser.add_argument(\n", - " \"--n_estimators\", type=int, default=500, help=\"The number of base model.\"\n", - " )\n", - " parser.add_argument(\n", - " \"--objective\", type=str, help=\"Objective function used by XGBoost\"\n", - " )\n", - "\n", - " parser.add_argument(\n", - " \"--max_depth\", type=int, default=3, help=\"The maximum depth of the tree.\"\n", - " )\n", - "\n", - " parser.add_argument(\n", - " \"--eta\",\n", - " type=float,\n", - " default=0.2,\n", - " help=\"Step size shrinkage used in update to prevents overfitting.\",\n", - " )\n", - " parser.add_argument(\n", - " \"--eval_metric\",\n", - " type=str,\n", - " default=None,\n", - " help=\"Evaluation metrics for validation data\"\n", - " )\n", - "\n", - " args, _ = parser.parse_known_args()\n", - "\n", - " # 加载数据集\n", - " train_x, train_y = load_dataset(\"train\")\n", - " print(\"Train dataset: train_shape={}\".format(train_x.shape))\n", - " test_x, test_y = load_dataset(\"test\")\n", - " if test_x is None or test_y is None:\n", - " print(\"Test dataset not found\")\n", - " eval_set = [(train_x, train_y)]\n", - " else:\n", - " eval_set = [(train_x, train_y), (test_x, test_y)]\n", - "\n", - " clf = XGBClassifier(\n", - " max_depth=args.max_depth,\n", - " eta=args.eta,\n", - " n_estimators=args.n_estimators,\n", - " objective=args.objective,\n", - " )\n", - " clf.fit(train_x, train_y, eval_set=eval_set, eval_metric=args.eval_metric)\n", - "\n", - " model_path = os.environ.get(\"PAI_OUTPUT_MODEL\")\n", - " os.makedirs(model_path, exist_ok=True)\n", - " clf.save_model(os.path.join(model_path, \"model.json\"))\n", - " print(f\"Save model succeed: model_path={model_path}/model.json\")\n", - "\n", - "\n", - "if __name__ == \"__main__\":\n", - " main()\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 使用Estimator提交训练作业\n", - "\n", - "通过 Estimator, 我们将以上构建的训练脚本 (xgb_src/train.py) 上传到 OSS上,通过`fit` 提交一个在云端执行XGBoost训练作业。 fit API接收的inputs分别是之前上传的训练和测试的数据,会被挂载到作业容器中(分别挂载到 `/ml/input/data/{channel_name}/`),供训练脚本读取输入数据。\n", - "\n", - "提交之后,SDK 会打印作业的详情URL,并且打印作业日志,直到作业退出(成功,失败,或是停止)。用户可以点击作业URL查看任务详情,执行日志,模型的Metric,机器资源使用率等信息。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.estimator import Estimator\n", - "from pai.image import retrieve\n", - "\n", - "\n", - "# 获取PAI提供的XGBoost训练镜像\n", - "image_uri = retrieve(\"xgboost\", framework_version=\"latest\").image_uri\n", - "print(image_uri)\n", - "\n", - "# 构建一个Estimator实例\n", - "est = Estimator(\n", - " # 作业启动脚本\n", - " command=\"python train.py $PAI_USER_ARGS\",\n", - " # 作业脚本的本地文件夹路径,会被打包上传到OSS\n", - " source_dir=\"./xgb_src/\",\n", - " image_uri=image_uri,\n", - " # 作业超参: 会通过Command arguments的方式传递给到作业脚本\n", - " hyperparameters={\n", - " \"n_estimator\": 100,\n", - " \"criterion\": \"gini\",\n", - " \"max_depth\": 5,\n", - " \"eval_metric\": \"auc\",\n", - " },\n", - " # 作业使用的机器实例\n", - " instance_type=\"ecs.c6.large\",\n", - ")\n", - "\n", - "# 使用上传到OSS的训练数据作为作业的数据\n", - "est.fit(\n", - " inputs={\n", - " \"train\": train_data, # train_data 将被挂载到`/ml/input/data/train`目录\n", - " \"test\": test_data, # test_data 将被挂载到`/ml/input/data/test`目录\n", - " },\n", - ")\n", - "print(est.model_data())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Step4: 部署模型\n", - "\n", - "以上训练获得模型,我们将使用[预置XGBoost Processor](https://help.aliyun.com/document_detail/470490.html)部署为一个在线服务。主要流程包括:\n", - "\n", - "1. 通过构建一个InferenceSpec\n", - "\n", - "InferenceSpec负责描述模型如何部署为一个在线服务,例如模型使用镜像部署,还是使用processor部署等。\n", - "\n", - "2. 构建Model对象\n", - "\n", - "Model对象可以直接部署服务,也可以通过`.register`注册到PAI的模型仓库。\n", - "\n", - "3. 使用`Model.deploy`部署在线服务。\n", - "\n", - "通过指定服务名称,机器实例类型,部署一个新的在线推理服务。\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pai.model import Model, InferenceSpec\n", - "from pai.predictor import Predictor\n", - "\n", - "from pai.common.utils import random_str\n", - "import os\n", - "\n", - "\n", - "# 使用模型文件地址以及 InferenceSpec 构建一个Model对象\n", - "m = Model(\n", - " # `est.model_data()`返回的是模型文件所在的OSS目录的URI,XGBoost processor需要传递具体的模型文件。\n", - " model_data=os.path.join(est.model_data(), \"model.json\"),\n", - " inference_spec=InferenceSpec(processor=\"xgboost\"),\n", - ")\n", - "\n", - "\n", - "# 部署服务\n", - "p: Predictor = m.deploy(\n", - " service_name=\"example_xgb_{}\".format(random_str(6)),\n", - " instance_type=\"ecs.c6.xlarge\",\n", - " # 启动的服务实例个数。\n", - " instance_count=1,\n", - " # 按照 每一个服务的资源使用量,而不是机器类型创建服务。\n", - " # instance_resource_config=ResourceConfig(\n", - " # cpu=2,\n", - " # memory=4000,\n", - " # )\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Step5: 测试在线服务\n", - "\n", - "`Model.deploy`方法返回一个 `Predictor` 对象,`Predictor.predict`方法支持向创建的推理服务发送推理请求,拿到预测结果。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(p.service_name)\n", - "\n", - "test_x = test.drop([\"target\"], axis=1)\n", - "\n", - "p.predict(test_x.to_numpy())" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "在测试结束后,删除服务。" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "p.delete_service()" - ] - } - ], - "metadata": { - "execution": { - "timeout": 1800 - }, - "kernelspec": { - "display_name": "pai-dev-py36", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.16" - }, - "vscode": { - "interpreter": { - "hash": "63703143536f433679c5464335316251eaa13807b3fcc3854dae32f2699871d6" - } - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/docs/source/user-guide/pretrained-model.rst b/docs/source/user-guide/pretrained-model.rst index b32c4b7..cf3f96c 100644 --- a/docs/source/user-guide/pretrained-model.rst +++ b/docs/source/user-guide/pretrained-model.rst @@ -87,113 +87,46 @@ PAI公共仓库中的部分模型,也提供了微调训练算法,支持用 .. code-block:: python - from pai.model import RegisteredModel - from pai.estimator import AlgorithmEstimator + from pai.model import RegisteredModel, ModelTrainingRecipe # 获取PAI提供的Bert模型 m = RegisteredModel("bert-base-uncased", model_provider="pai") - # 获取模型的微调训练算法 - est: AlgorithmEstimator = m.get_estimator() - - # 查看算法的超参数定义描述、输入定义描述,以及输出定义描述。 - print(est.hyperparameter_definitions) - # [{'DefaultValue': '1', - # 'Type': 'Int', - # 'Description': 'Number of epochs to train the model. Each epoch is one complete iteration over the entire training dataset.', - # 'Required': True, - # 'Name': 'max_epochs'}, - # {'DefaultValue': '16', - # 'Type': 'Int', - # 'Description': 'Number of samples that will be propagated through the model. A higher value might consume more memory.', - # 'Required': False, - # 'Name': 'batch_size'}, - # {'DefaultValue': '0.00001', - # 'Type': 'Float', - # 'Description': 'The initial learning rate to be used for training. A higher value usually implies more aggression in gradient updates.', - # 'Required': False, - # 'Name': 'learning_rate'}, - # {'DefaultValue': '2000', - # 'Type': 'Int', - # 'Description': 'Number of updates steps before two checkpoint.', - # 'Required': False, - # 'Name': 'save_steps'} - # ] - print(est.input_channel_definitions) - # [{'Description': 'Input channel for pretrained model to be fine-tuned on.', - # 'Required': True, - # 'SupportedChannelTypes': ['oss'], - # 'Properties': {'ResourceUse': 'Base', 'ResourceType': 'Model'}, - # 'Name': 'model'}, - # {'Description': 'Input channel for training dataset.', - # 'Required': True, - # 'SupportedChannelTypes': ['oss'], - # 'Properties': {'ResourceUse': 'Train', 'ResourceType': 'Dataset'}, - # 'Name': 'train'}, - # {'Description': 'Input channel for validation dataset.', - # 'Required': False, - # 'SupportedChannelTypes': ['oss'], - # 'Properties': {'ResourceUse': 'Validation', 'ResourceType': 'Dataset'}, - # 'Name': 'validation'}] - - - # 查看算法的默认输入,包含了预训练模型,训练数据,验证数据等 - training_inputs = m.get_estimator_inputs() - print(training_inputs) - # { - # 'model': 'oss://pai-quickstart-cn-hangzhou.oss-cn-hangzhou.aliyuncs.com/huggingface/models/bert-base-uncased/main/', - # 'train': 'oss://pai-quickstart-cn-hangzhou.oss-cn-hangzhou.aliyuncs.com/huggingface/datasets/sst2/main/train.json', - # 'validation': 'oss://pai-quickstart-cn-hangzhou.oss-cn-hangzhou.aliyuncs.com/huggingface/datasets/sst2/main/validation.json' - # } - - # 使用默认输入进行微调训练 - est.fit(inputs=training_inputs) - - # 查看训练输出的模型,默认模型存储在OSS URI上 - print(est.model_data()) - - -以上的训练任务中,我们使用了PAI提供的公共数据集,对模型进行微调训练。当用户需要使用自己的数据集进行微调训练时,需要先将数据准备到OSS,或是NAS上,然后将数据的OSS或是NAS路径,作为训练任务的输入。 - - -使用用户训练数据集提交训练任务: - -.. code-block:: python - - from pai.estimator import AlgorithmEstimator - - # 获取模型的微调训练算法 - est: AlgorithmEstimator = m.get_estimator() - # 配置修改提交的训练算法超参,具体的超参用途可以查看 est.hyperparameter_definitions 中的描述. - est.hyperparameters = { - 'max_epochs': 1, - 'batch_size': 8, - 'learning_rate': 2e-05, - 'save_steps': 2000 - } - - # 默认的训练输入 - default_training_inputs = m.get_estimator_inputs() - # 使用用户的数据集进行微调训练 - training_inputs = { - # 使用PAI提供预训练模型作为基础模型输入 - "model": default_training_inputs["model"], - # 使用用户的训练和测试数据集 - "train": "oss:///my-dataset/train.json", - "validation": "oss:///my-dataset/validation.json" - } - - est.fit(inputs=training_inputs) - -用户可以通过模型卡片上的文档,查看模型的微调训练数据格式。同时也可以参考相应的模型微调训练的默认输入数据格式,进行数据的准备。 - -下载PAI数据集到本地目录: - -.. code-block:: python + training_recipe = m.training_recipe() + + training_recipe = ModelTrainingRecipe( + model_name = "bert-base-uncased", + model_provider = "pai", + instance_type = "ecs.c6.xlarge", + # 训练任务的超参数 + hyperparameters={ + "max_epochs": 1, + "learning_rate": 0.00001, + "batch_size": 16, + "save_steps": 2000, + }, + ) - from pai.common.oss_util import download + # 查看模型微调算法输入定义 + print(training_recipe.input_channels) + # 查看模型微调算法超参数定义 + print(training_recipe.hyperparameter_definitions) + # 查看默认训练输入数据 + print(training_recipe.default_inputs) + + # 提交微调训练作业 + job = training_recipe.train( + job_name="train_recipe_example", + # 配置使用用户在OSS Bucket上的数据作为训练数据 + # inputs={ + # "train": "oss:///" + # } + ) + # 获取微调后模型路径 + print(training_recipe.model_data()) - # 默认的训练输入 - default_training_inputs = m.get_estimator_inputs() + # 使用PAI提供的推理服务配置部署模型 + predictor = training_recipe.deploy( + service_name="bert_example", + ) - # 下载PAI提供的公共训练数据到本地 - download(default_training_inputs["train"], "./train/") +用户可以通过PAI ModelGallery提供的模型卡片上的文档,查看具体模型模型的微调训练数据格式。 diff --git a/docs/source/user-guide/processing-job.rst b/docs/source/user-guide/processing-job.rst new file mode 100644 index 0000000..1c61024 --- /dev/null +++ b/docs/source/user-guide/processing-job.rst @@ -0,0 +1,214 @@ +===================== +通用作业(Experimental) +===================== + +SDK提供了HighLevel的通用任务API: :class:`~pai.processor.Processor` 支持用户提交通用作业到PAI,使用示例如下。 + +.. code-block:: python + + from pai.processor import Processor + + # 通过 Processor 配置通用作业的信息 + processor = Processor( + command="" + source_dir="" + image_uri="" + instance_type="", + parameters={ + "interval": 500, + "max_retry": 5, + }, + ) + + # 指定作业的输入和输出,并提交作业 + processor.run( + inputs={ + "inputs": "oss:///path/to/input/", + }, + outputs={ + "outputs": "oss:///path/to/output/", + }, + ) + +用户可以通过提交通用作业来完成自定义训练、数据处理等一系列任务,本文档将介绍如何通过 Processor 来提交通用作业。 + +.. note:: 通用作业为实验性功能,在未来版本中可能会变更或者移除。 + +准备作业脚本 +***************** + +通过 :class:`~pai.processor.Processor` 的 ``source_dir`` 参数, +开发者可以配置需要上传执行的代码目录。在通用作业提交之后, +相应的代码会被上传到用户OSS,并在作业启动之前被下载到作业的执行环境中。 + +用户代码目录示例: + +.. code-block:: shell + + |-- code_dir # 用户指定上传的代码目录 + `-- main.py # 作业脚本,用户可以通过 python main.py 的命令拉起脚本 + `-- utils.py + + +通过 ``Processor`` 设置使用的代码目录,可以是绝对路径或是相对路径。 + +.. code-block:: python + + processor = Processor( + command="python main.py", + # 可以通过相对路径或是绝对路径的方式指定代码. + source_dir="code_dir/", + # source_dir="/home/foo/code_dir/", + ) + +作业代码会被下载到作业执行环境的 ``/ml/usercode`` 目录下,作业启动前会切换 working directory 至 ``/ml/usercode`` 目录。 + +.. code-block:: shell + + |-- /ml/usercode/ # 作业代码所在目录 + `-- main.py + `-- utils.py + +用户也可以通过传递一个OSS Bucket路径,作为作业代码路径。 + +.. code-block:: python + + from pai.common.oss_utils import upload + + # 上传代码到OSS,返回一个OSS URI + code_uri = upload( + local_path="./code_dir/", + oss_path="path/for/code/" + ) + # code_uri: oss:///path/for/code/ + + processor = Processor( + command="python main.py", + # 使用OSS上的作业代码 + source_dir=code_uri, + ) + + +配置作业镜像 +***************** + +在提交执行作业时,用户需要配置作业运行使用的镜像 ( :class:`~pai.processor.Processor` 的 ``image_uri`` +参数),镜像内包含作业执行所需的依赖,例如Python、CUDA、机器学习框架、以及依赖的第三方库等,从而支持代码运行。 + +用户可以配置使用阿里云镜像仓库内的镜像,也可以使用PAI提供的公共镜像(推荐)。对于常见的机器学习框架,PAI提供了公共镜像供用户使用,用户可以通过以下的代码获取镜像信息: + +.. note:: + + 用户可以通过PAI `公共镜像文档 `_ 查看PAI提供的镜像内安装的Python三方库信息。 + +.. note:: + + 企业版容器镜像服务ACR默认需要通过用户的VPC访问镜像仓库,具体请参考文档: `配置专有网络的访问控制 `_。 + 作业的机器实例位于云产品PAI的VPC环境内,需要通过配置 :class:`~pai.processor.Processor` 的 ``user_vpc_config`` 参数,将作业实例与用户VPC网络进行连接,作业才能通过用户VPC访问到企业版镜像仓库,拉取镜像。 + +.. code-block:: python + + from pai.image import retrieve, list_images + + # 获取PAI提供的最新的PyTorch的GPU镜像 + # 通过参数 framework_version="latest",retrieve 方法会返回最新的 PyTorch 镜像 + print(retrieve(framework_name="TensorFlow", framework_version="latest", + accelerator_type="GPU")) + + # 获取PAI提供的所有PyTorch镜像 + for image_info in list_images(framework_name="PyTorch"): + print(image_info) + + +安装代码依赖 +************************************************ + +当代码有额外的Python包依赖,可以通过在代码目录下编写 `requirements.txt `_ ,相应的三方库依赖会在用户脚本执行前被安装到作业环境中。 + +配置使用 ``requirements.txt`` 的作业代码目录示例如下: + +.. code-block:: shell + + |-- code_dir # 作业配置使用的脚本目录 + |-- requirements.txt # 作业的requirements信息 + `-- main.py + `-- utils.py + + +执行作业 +***************** + +用户通过构建 :class:`~pai.processor.Processor` 指定作业的脚本目录、启动脚本、参数、机器资源等, +然后通过 :meth:`~pai.processor.Processor.run` 方法提交作业。在提交作业之后,SDK会打印作业的控制台URL, +并持续打印作业的输出日志信息,直到作业结束退出(作业状态为成功,失败,或是被停止)。 + +用户可以通过作业URL,去控制台查看作业执行详情、日志、机器的资源使用情况、以及作业的Metrics等信息。 +在作业执行完成之后退出,可以通过 :meth:`~pai.processor.Processor.get_outputs_data` 方法获得提交作业的产出的模型的OSS路径。 + +示例代码如下: + +.. code-block:: python + + from pai.processor import Processor + from pai.image import retrieve + + # 获取PAI支持的最新 PyTorch 镜像 + image_uri = retrieve("PyTorch", accelerator_type="GPU").image_uri + + processor = Processor( + # 作业的启动命令 + command="python main.py", + # 作业脚本所在目录 + source_dir="./code_dir/", + # 作业使用的镜像 + image_uri=_image_uri, + # 作业使用的机器类型, 支持的机器类型见文档 https://help.aliyun.com/document_detail/171758.html#section-55y-4tq-84y + instance_type="ecs.c6.xlarge", + # 作业的参数 + parameters={ + "interval": 500, + "max_retry": 5, + }, + # 作业名称前缀,用户提交的作业使用的Name为 `{base_job_name}_{submitted-datetime}` + base_job_name="example_processing_job", + ) + + # 提交作业,同时打印作业的Web详情页URL。 + # run 方法默认等待到作业终止(成功,失败,会是被停止)。 + processor.run( + inputs={ + "inputs": "oss:///path/to/input/", + }, + outputs={ + "outputs": "oss:///path/to/output/", + }, + ) + + +下载作业输出 +***************** + +作业执行完成之后,用户可以通过 :meth:`pai.processor.Processor.get_outputs_data` +获得提交作业输出的OSS路径。用户可以通过SDK提供的 +``download`` 方法下载模型到本地,也可以使用 ``ossutil`` 命令行工具下载模型。 + + +使用 ``pai.common.oss_utils.download`` 方法下载模型到本地: + +.. code-block:: python + + from pai.common.oss_utils import download + + outputs = processor.get_outputs_data() + + # 下载模型到本地 + download(oss_path=outputs["outputs"], local_path="./outputs/") + + +通过 ``ossutil`` 命令行工具下载模型到本地。 + +.. code-block:: shell + + ossutil cp -r ./outputs/ + +对于 ``ossutil`` 命令行工具的使用,可以参考 `ossutil工具使用文档 `_ 。 diff --git a/noxfile.py b/noxfile.py index 244846e..31974b4 100644 --- a/noxfile.py +++ b/noxfile.py @@ -15,7 +15,6 @@ "requirements/lint-requirements.txt", ) - DOC_REQUIREMENTS = os.path.join( _pkg_root, "requirements/doc-requirements.txt", @@ -23,11 +22,11 @@ PASSING_ENVIRONMENTS = { "PAI_TEST_CONFIG": "test.ini", + "PYTHONWARNINGS": "ignore", } - -UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8"] -INTEGRATION_TEST_PYTHON_VERSIONS = ["3.7"] +UNIT_TEST_PYTHON_VERSIONS = ["3.8", "3.9", "3.10"] +INTEGRATION_TEST_PYTHON_VERSIONS = ["3.8"] TEST_VENV_BACKEND = os.environ.get("PAI_TEST_VENV_BACKEND", "conda") @@ -39,7 +38,7 @@ def install_test_dependencies(session: Session): session.install("-r", TEST_REQUIREMENTS) -@nox.session(venv_backend=TEST_VENV_BACKEND, python=INTEGRATION_TEST_PYTHON_VERSIONS) +@nox.session(venv_backend=TEST_VENV_BACKEND) def integration(session: Session): """Run integration test.""" install_test_dependencies(session=session) @@ -50,14 +49,20 @@ def integration(session: Session): if os.environ.get(key, value) is not None } + # set worker to 2 * cpu_count (physical cores) if not specified + if "-n" not in session.posargs and "--numprocesses" not in session.posargs: + pos_args = session.posargs + ["-n", str(os.cpu_count() * 2)] + else: + pos_args = session.posargs session.run( "pytest", + "-vv", "--cov-config=.coveragerc", "--cov-append", "--cov-report=html", "--cov=pai", os.path.join("tests", "integration"), - *session.posargs, + *pos_args, env=env, ) session.run( @@ -72,8 +77,10 @@ def integration(session: Session): def unit(session: Session): """Run unit test.""" install_test_dependencies(session=session) + # run test cases session.run( "pytest", + "-vv", "--cov-config=.coveragerc", "--cov-append", "--cov-report=html", @@ -93,7 +100,7 @@ def unit(session: Session): def lint(session: Session): """Enforce code style with flake8.""" session.install("-r", LINT_REQUIREMENTS) - session.run("flake8", "--config", ".flake8") + session.run("flake8", "--exclude", "venv,.venv,env,.nox,build,dist", "--config", ".flake8") session.run("black", "--check", ".") session.run("typos", "--config", "typos.toml", "-w") @@ -154,6 +161,7 @@ def notebook(session: Session): session.run( "pytest", + "-vv", "--timeout", "3000", "--nbmake", diff --git a/pai/__init__.py b/pai/__init__.py index 71e8bc4..55b7955 100644 --- a/pai/__init__.py +++ b/pai/__init__.py @@ -12,18 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys - -if sys.version_info >= (3, 8): - # noinspection PyCompatibility - from importlib.metadata import PackageNotFoundError, version -else: - from importlib_metadata import PackageNotFoundError, version - -PACKAGE_NAME = "alipai" - -try: - __version__ = version(PACKAGE_NAME) -except PackageNotFoundError: - # package is not installed - __version__ = None +from .version import VERSION as __version__ diff --git a/pai/api/api_container.py b/pai/api/api_container.py index c68894a..924e056 100644 --- a/pai/api/api_container.py +++ b/pai/api/api_container.py @@ -11,18 +11,22 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +from typing import Optional, Union from alibabacloud_credentials.client import Client as CredentialClient from alibabacloud_sts20150401.client import Client as StsClient +from ..common.consts import DEFAULT_NETWORK_TYPE, PAI_VPC_ENDPOINT, Network +from ..common.utils import is_domain_connectable from .algorithm import AlgorithmAPI from .base import PAIRestResourceTypes, ServiceName, WorkspaceScopedResourceAPI from .client_factory import ClientFactory from .code_source import CodeSourceAPI from .dataset import DatasetAPI +from .experiment import ExperimentAPI from .image import ImageAPI from .job import JobAPI +from .lineage import LineageAPI from .model import ModelAPI from .pipeline import PipelineAPI from .pipeline_run import PipelineRunAPI @@ -44,6 +48,8 @@ PAIRestResourceTypes.Pipeline: PipelineAPI, PAIRestResourceTypes.PipelineRun: PipelineRunAPI, PAIRestResourceTypes.TensorBoard: TensorBoardAPI, + PAIRestResourceTypes.Experiment: ExperimentAPI, + PAIRestResourceTypes.Lineage: LineageAPI, } @@ -55,11 +61,32 @@ class ResourceAPIsContainerMixin(object): _region_id = None _workspace_id = None - def __init__(self, header=None, runtime=None): + def __init__( + self, header=None, runtime=None, network: Optional[Union[str, Network]] = None + ): + """Initialize ResourceAPIsContainerMixin. + + Args: + header: Header for API request. + runtime: Runtime for API request. + network: Network type used to connect to PAI services. + """ self.header = header self.runtime = runtime self.api_container = dict() self.acs_client_container = dict() + if network: + self.network = ( + Network.from_string(network) if isinstance(network, str) else network + ) + elif DEFAULT_NETWORK_TYPE: + self.network = Network.from_string(DEFAULT_NETWORK_TYPE) + else: + self.network = ( + Network.VPC + if is_domain_connectable(PAI_VPC_ENDPOINT.format(self._region_id)) + else Network.PUBLIC + ) def _acs_credential_client(self): if self._credential_client: @@ -74,6 +101,7 @@ def _get_acs_client(self, service_name): service_name=service_name, credential_client=self._acs_credential_client(), region_id=self._region_id, + network=self.network, ) self.acs_client_container[service_name] = acs_client return acs_client @@ -182,3 +210,11 @@ def pipeline_api(self) -> PipelineAPI: @property def pipeline_run_api(self) -> PipelineRunAPI: return self.get_api_by_resource(PAIRestResourceTypes.PipelineRun) + + @property + def experiment_api(self) -> ExperimentAPI: + return self.get_api_by_resource(PAIRestResourceTypes.Experiment) + + @property + def lineage_api(self) -> LineageAPI: + return self.get_api_by_resource(PAIRestResourceTypes.Lineage) diff --git a/pai/api/base.py b/pai/api/base.py index f19a39d..64661f5 100644 --- a/pai/api/base.py +++ b/pai/api/base.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging from abc import ABCMeta from typing import Any, Dict, List, Optional, Union @@ -22,7 +21,9 @@ from six import with_metaclass from Tea.model import TeaModel -logger = logging.getLogger(__name__) +from ..common.logging import get_logger + +logger = get_logger(__name__) class ServiceName(object): @@ -34,6 +35,7 @@ class ServiceName(object): PAIFLOW = "paiflow" # Other services provided by Alibaba Cloud. STS = "sts" + PAI_DSW = "pai-dsw" class PAIRestResourceTypes(object): @@ -51,6 +53,8 @@ class PAIRestResourceTypes(object): Pipeline = "Pipeline" PipelineRun = "PipelineRun" TensorBoard = "TensorBoard" + Experiment = "Experiment" + Lineage = "Lineage" class ResourceAPI(with_metaclass(ABCMeta, object)): diff --git a/pai/api/client_factory.py b/pai/api/client_factory.py index 028efe5..0f0552a 100644 --- a/pai/api/client_factory.py +++ b/pai/api/client_factory.py @@ -14,21 +14,24 @@ from __future__ import absolute_import -import logging +from typing import Optional from alibabacloud_credentials.client import Client as CredentialClient from alibabacloud_sts20150401.client import Client as StsClient from alibabacloud_tea_openapi.models import Config +from ..common.consts import Network +from ..common.logging import get_logger from ..common.utils import http_user_agent from ..libs.alibabacloud_aiworkspace20210204.client import Client as WorkspaceClient from ..libs.alibabacloud_eas20210701.client import Client as EasClient from ..libs.alibabacloud_pai_dlc20201203.client import Client as DlcClient +from ..libs.alibabacloud_pai_dsw20220101.client import Client as DswClient from ..libs.alibabacloud_paiflow20210202.client import Client as FlowClient from ..libs.alibabacloud_paistudio20220112.client import Client as PaiClient from .base import ServiceName -_logger = logging.getLogger(__name__) +_logger = get_logger(__name__) DEFAULT_SERVICE_ENDPOINT_PATTERN = "{}.{}.aliyuncs.com" @@ -41,6 +44,7 @@ class ClientFactory(object): ServiceName.PAIFLOW: FlowClient, ServiceName.PAI_STUDIO: PaiClient, ServiceName.STS: StsClient, + ServiceName.PAI_DSW: DswClient, } @staticmethod @@ -53,16 +57,19 @@ def create_client( service_name, region_id: str, credential_client: CredentialClient, + network: Optional[Network] = None, **kwargs, ): """Create an API client which is responsible to interacted with the Alibaba Cloud service.""" + config = Config( region_id=region_id, credential=credential_client, endpoint=cls.get_endpoint( service_name=service_name, region_id=region_id, + network=network, ), signature_algorithm="v2", user_agent=http_user_agent(), @@ -72,9 +79,20 @@ def create_client( return client @classmethod - def get_endpoint(cls, service_name: str, region_id: str) -> str: + def get_endpoint( + cls, service_name: str, region_id: str, network: Optional[Network] = None + ) -> str: """Get the endpoint for the service client.""" if not region_id: raise ValueError("Please provide region_id to get the endpoint.") - return DEFAULT_SERVICE_ENDPOINT_PATTERN.format(service_name, region_id) + if network and network != Network.PUBLIC: + if service_name == "pai-eas": + # see endpoint list provided by PAI-EAS + # https://next.api.aliyun.com/product/eas + subdomain = f"pai-eas-manage-{network.value.lower()}" + else: + subdomain = f"{service_name}-{network.value.lower()}" + else: + subdomain = service_name + return DEFAULT_SERVICE_ENDPOINT_PATTERN.format(subdomain, region_id) diff --git a/pai/api/dataset.py b/pai/api/dataset.py index f693e3e..5cdfe06 100644 --- a/pai/api/dataset.py +++ b/pai/api/dataset.py @@ -46,6 +46,8 @@ def list( name: str = None, page_size: int = 20, page_number: int = 1, + order: str = "DESC", + **kwargs, ) -> PaginatedResult: """Returns Dataset in paging. @@ -54,6 +56,7 @@ def list( name: Name of the Dataset. page_number: Page number. page_size: Page size. + order: Return list order. Returns: int: A list of datasets match the conditions. @@ -64,6 +67,8 @@ def list( name=name, page_size=page_size, page_number=page_number, + order=order, + **kwargs, ) resp: ListDatasetsResponseBody = self._do_request( diff --git a/pai/api/entity_base.py b/pai/api/entity_base.py deleted file mode 100644 index 044ae9a..0000000 --- a/pai/api/entity_base.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright 2023 Alibaba, Inc. or its affiliates. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Any, Dict, Optional, Type - -from ..schema.base import BaseAPIResourceSchema -from ..session import Session, get_default_session - - -class EntityBaseMixin(object): - - _schema_cls: Type[BaseAPIResourceSchema] - resource_type: str - - def __init__(self, session: Optional[Session] = None, **kwargs) -> None: - super(EntityBaseMixin, self).__init__() - self._session = session or get_default_session() - - @property - def session(self) -> Session: - return self._session - - @classmethod - def from_api_object(cls, obj_dict: Dict[str, Any], session: Session = None): - """Construct an entity representing the API resource from response. - - Args: - session: Session for the instance. - obj_dict: Response in json - - Returns: - An entity representing the resource. - """ - session = session or get_default_session() - return cls._schema_cls(session=session).load(obj_dict) - - def to_api_object(self) -> Dict[str, Any]: - """Convert the current instance to a dictionary representing an API object. - - Returns: - dict: a dictionary representing the API object. - """ - return self._schema_cls().dump(self) - - def patch_from_api_object(self, api_obj: Dict[str, Any]): - if not api_obj: - raise ValueError("REST API object should not be empty.") - - return self._schema_cls(instance=self).load(api_obj) - - def __repr__(self): - return "{}:{}".format(type(self).__name__, self.id) - - def __str__(self) -> str: - return self.__repr__() - - @property - def resource_api(self): - return self._session.get_api_by_resource(self.resource_type) diff --git a/pai/api/experiment.py b/pai/api/experiment.py new file mode 100644 index 0000000..07341a3 --- /dev/null +++ b/pai/api/experiment.py @@ -0,0 +1,120 @@ +# Copyright 2023 Alibaba, Inc. or its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import List + +from ..libs.alibabacloud_aiworkspace20210204.models import ( + CreateExperimentRequest, + CreateExperimentResponseBody, + Experiment, + LabelInfo, + ListExperimentRequest, + ListExperimentResponseBody, + SetExperimentLabelsRequest, + UpdateExperimentRequest, +) +from .base import PaginatedResult, ServiceName, WorkspaceScopedResourceAPI + + +class ExperimentAPI(WorkspaceScopedResourceAPI): + BACKEND_SERVICE_NAME = ServiceName.PAI_WORKSPACE + + _get_method = "get_experiment_with_options" + _create_method = "create_experiment_with_options" + _list_method = "list_experiment_with_options" + _update_method = "update_experiment_with_options" + _delete_method = "delete_experiment_with_options" + _set_labels_method = "set_experiment_labels_with_options" + _delete_label_method = "delete_experiment_label_with_options" + + def get(self, experiment_id: str): + resp: Experiment = self._do_request( + method_=self._get_method, experiment_id=experiment_id + ) + return resp.to_map() + + def create( + self, + name, + artifact_uri, + workspace_id, + **kwargs, + ): + request = CreateExperimentRequest( + name=name, + artifact_uri=artifact_uri, + workspace_id=workspace_id, + **kwargs, + ) + resp: CreateExperimentResponseBody = self._do_request( + method_=self._create_method, request=request + ) + return resp.experiment_id + + def list( + self, + name: str = None, + page_size: int = 50, + page_number: int = 1, + order: str = "DESC", + **kwargs, + ) -> PaginatedResult: + workspace_id = kwargs.pop("workspace_id", None) + request = ListExperimentRequest( + name=name, + page_size=page_size, + page_number=page_number, + order=order, + workspace_id=workspace_id, + **kwargs, + ) + resp: ListExperimentResponseBody = self._do_request( + method_=self._list_method, request=request + ) + return self.make_paginated_result(resp) + + def update( + self, + experiment_id: str, + name: str, + **kwargs, + ): + request = UpdateExperimentRequest( + name=name, + **kwargs, + ) + self._do_request( + method_=self._update_method, experiment_id=experiment_id, request=request + ) + return + + def delete(self, experiment_id: str): + self._do_request(method_=self._delete_method, experiment_id=experiment_id) + return + + def set_experiment_labels(self, experiment_id: str, labels: List[LabelInfo]): + request = SetExperimentLabelsRequest( + labels=labels, + ) + self._do_request( + method_=self._set_labels_method, + experiment_id=experiment_id, + request=request, + ) + return + + def delete_experiment_label(self, experiment_id: str, key: str): + self._do_request( + method_=self._delete_label_method, experiment_id=experiment_id, key=key + ) + return diff --git a/pai/api/job.py b/pai/api/job.py index 3212f7a..143f0f9 100644 --- a/pai/api/job.py +++ b/pai/api/job.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging from datetime import datetime from typing import Any, Dict, List +from ..common.logging import get_logger from ..libs.alibabacloud_pai_dlc20201203.models import ( GetJobEventsRequest, GetJobEventsResponseBody, @@ -27,7 +27,7 @@ ) from .base import PaginatedResult, ServiceName, WorkspaceScopedResourceAPI -logger = logging.getLogger(__name__) +logger = get_logger(__name__) class JobAPI(WorkspaceScopedResourceAPI): diff --git a/pai/api/lineage.py b/pai/api/lineage.py new file mode 100644 index 0000000..b51073b --- /dev/null +++ b/pai/api/lineage.py @@ -0,0 +1,76 @@ +# Copyright 2023 Alibaba, Inc. or its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from dataclasses import dataclass +from typing import Dict, List, Optional + +from ..common.logging import get_logger +from ..libs.alibabacloud_aiworkspace20210204.models import ( + LineageEntity, + RegisterLineageRequest, +) +from .base import ServiceName, WorkspaceScopedResourceAPI + +logger = get_logger(__name__) + + +@dataclass +class _LineageEntity: + Attributes: Dict[str, str] = None + EntityType: Optional[str] = None + Name: Optional[str] = None + QualifiedName: Optional[str] = None + + +class LineageAPI(WorkspaceScopedResourceAPI): + BACKEND_SERVICE_NAME = ServiceName.PAI_WORKSPACE + + _register_lineage = "register_lineage_with_options" + + def log_lineage( + self, + inputs: List[_LineageEntity], + outputs: List[_LineageEntity], + job_id: str, + workspace_id: str, + ): + input_entities = [] + output_entities = [] + for input in inputs: + input_entities.append( + LineageEntity( + attributes=input.Attributes, + entity_type=input.EntityType, + name=input.Name, + qualified_name=input.QualifiedName, + ) + ) + for output in outputs: + output_entities.append( + LineageEntity( + attributes=output.Attributes, + entity_type=output.EntityType, + name=output.Name, + qualified_name=output.QualifiedName, + ) + ) + request = RegisterLineageRequest( + register_task_as_entity=True, + input_entities=input_entities, + output_entities=output_entities, + qualified_name="pai_dlcjob-task." + job_id, + name=job_id, + attributes={"WorkspaceId": workspace_id}, + ) + response = self._do_request(method_=self._register_lineage, request=request) + logger.debug(response) diff --git a/pai/api/model.py b/pai/api/model.py index 2f76b7e..65570bd 100644 --- a/pai/api/model.py +++ b/pai/api/model.py @@ -77,6 +77,7 @@ def create( def list( self, + collections: str = None, domain: str = None, label: str = None, label_string: str = None, @@ -93,6 +94,7 @@ def list( workspace_id: str = None, ) -> PaginatedResult: request = ListModelsRequest( + collections=collections, domain=domain, label=label, label_string=label_string, @@ -125,6 +127,7 @@ def create_version( self, model_id: str, approval_status: str = None, + evaluation_spec: Dict[str, Any] = None, format_type: str = None, framework_type: str = None, inference_spec: Dict[str, Any] = None, @@ -143,6 +146,7 @@ def create_version( request = CreateModelVersionRequest( approval_status=approval_status, + evaluation_spec=evaluation_spec, format_type=format_type, framework_type=framework_type, inference_spec=inference_spec, @@ -223,6 +227,7 @@ def update_version( model_id: str, version: str, approval_status: str = None, + evaluation_spec: Dict[str, Any] = None, inference_spec: Dict[str, Any] = None, metrics: Dict[str, Any] = None, options: str = None, @@ -233,6 +238,7 @@ def update_version( ): request = UpdateModelVersionRequest( approval_status=approval_status, + evaluation_spec=evaluation_spec, inference_spec=inference_spec, metrics=metrics, options=options, diff --git a/pai/api/pipeline.py b/pai/api/pipeline.py index 2da666b..546f39e 100644 --- a/pai/api/pipeline.py +++ b/pai/api/pipeline.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging from typing import Any, Dict +from ..common.logging import get_logger from ..libs.alibabacloud_paiflow20210202.models import ( CreatePipelineRequest, CreatePipelineResponseBody, @@ -26,7 +26,7 @@ ) from .base import PaginatedResult, ServiceName, WorkspaceScopedResourceAPI -logger = logging.getLogger(__name__) +logger = get_logger(__name__) class PipelineAPI(WorkspaceScopedResourceAPI): diff --git a/pai/api/service.py b/pai/api/service.py index 331df2c..da95be6 100644 --- a/pai/api/service.py +++ b/pai/api/service.py @@ -13,10 +13,10 @@ # limitations under the License. import json -import logging import typing from typing import Any, Dict, Union +from ..common.logging import get_logger from ..libs.alibabacloud_eas20210701.models import ( CreateServiceRequest, CreateServiceResponseBody, @@ -30,7 +30,7 @@ ) from .base import PaginatedResult, ResourceAPI, ServiceName -logger = logging.getLogger(__name__) +logger = get_logger(__name__) class ServiceAPI(ResourceAPI): diff --git a/pai/api/tensorboard.py b/pai/api/tensorboard.py index d1bccf0..6dd7424 100644 --- a/pai/api/tensorboard.py +++ b/pai/api/tensorboard.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging from typing import Optional +from ..common.logging import get_logger from ..libs.alibabacloud_pai_dlc20201203.models import ( CreateTensorboardRequest, CreateTensorboardResponseBody, @@ -27,7 +27,7 @@ ) from .base import PaginatedResult, ServiceName, WorkspaceScopedResourceAPI -logger = logging.getLogger(__name__) +logger = get_logger(__name__) class TensorBoardAPI(WorkspaceScopedResourceAPI): diff --git a/pai/api/training_job.py b/pai/api/training_job.py index 64dcc91..4ff1f86 100644 --- a/pai/api/training_job.py +++ b/pai/api/training_job.py @@ -19,11 +19,15 @@ AlgorithmSpec, CreateTrainingJobRequest, CreateTrainingJobRequestComputeResource, + CreateTrainingJobRequestComputeResourceInstanceSpec, + CreateTrainingJobRequestComputeResourceSpotSpec, + CreateTrainingJobRequestExperimentConfig, CreateTrainingJobRequestHyperParameters, CreateTrainingJobRequestInputChannels, CreateTrainingJobRequestLabels, CreateTrainingJobRequestOutputChannels, CreateTrainingJobRequestScheduler, + CreateTrainingJobRequestSettings, CreateTrainingJobRequestUserVpc, CreateTrainingJobResponseBody, GetTrainingJobRequest, @@ -35,7 +39,6 @@ class TrainingJobAPI(WorkspaceScopedResourceAPI): - BACKEND_SERVICE_NAME = ServiceName.PAI_STUDIO _list_method = "list_training_jobs_with_options" @@ -85,9 +88,15 @@ def create( instance_type, instance_count, job_name, + spot_spec: Optional[Dict[str, Any]] = None, + instance_spec: Optional[Dict[str, str]] = None, + resource_id: Optional[str] = None, + resource_type: Optional[str] = None, hyperparameters: Optional[Dict[str, Any]] = None, input_channels: Optional[List[Dict[str, Any]]] = None, output_channels: Optional[List[Dict[str, Any]]] = None, + environments: Dict[str, str] = None, + requirements: List[str] = None, labels: Optional[Dict[str, str]] = None, max_running_in_seconds: Optional[int] = None, description: Optional[str] = None, @@ -96,6 +105,8 @@ def create( algorithm_provider: Optional[str] = None, algorithm_spec: Optional[Dict[str, Any]] = None, user_vpc_config: Optional[Dict[str, Any]] = None, + experiment_config: Optional[Dict[str, Any]] = None, + settings: Optional[Dict[str, Any]] = None, ) -> str: """Create a TrainingJob.""" if algorithm_spec and ( @@ -119,10 +130,30 @@ def create( CreateTrainingJobRequestOutputChannels().from_map(ch) for ch in output_channels ] - compute_resource = CreateTrainingJobRequestComputeResource( - ecs_count=instance_count, - ecs_spec=instance_type, - ) + if instance_type: + spot_spec = ( + CreateTrainingJobRequestComputeResourceSpotSpec().from_map(spot_spec) + if spot_spec + else None + ) + compute_resource = CreateTrainingJobRequestComputeResource( + ecs_count=instance_count, + ecs_spec=instance_type, + # use_spot_instance=bool(spot_spec), + spot_spec=spot_spec, + ) + elif instance_spec: + compute_resource = CreateTrainingJobRequestComputeResource( + resource_id=resource_id, + instance_count=instance_count, + instance_spec=CreateTrainingJobRequestComputeResourceInstanceSpec().from_map( + instance_spec + ), + ) + else: + raise ValueError("Please provide instance_type or instance_spec.") + + hyperparameters = hyperparameters or dict() hyper_parameters = [ CreateTrainingJobRequestHyperParameters( name=name, @@ -150,6 +181,9 @@ def create( compute_resource=compute_resource, hyper_parameters=hyper_parameters, input_channels=input_channels, + resource_type=resource_type, + environments=environments, + python_requirements=requirements, labels=labels, output_channels=output_channels, scheduler=scheduler, @@ -157,6 +191,14 @@ def create( training_job_name=job_name, algorithm_spec=algo_spec, user_vpc=CreateTrainingJobRequestUserVpc().from_map(user_vpc_config), + experiment_config=CreateTrainingJobRequestExperimentConfig().from_map( + experiment_config + ), + settings=( + CreateTrainingJobRequestSettings().from_map(settings) + if settings + else None + ), ) resp: CreateTrainingJobResponseBody = self._do_request( @@ -186,4 +228,10 @@ def list_logs( training_job_id=training_job_id, request=request, ) - return self.make_paginated_result(resp) + # resp.logs may be None + logs = resp.logs or [] + total_count = resp.total_count or 0 + return PaginatedResult( + items=logs, + total_count=total_count, + ) diff --git a/pai/common/consts.py b/pai/common/consts.py index a5a46d1..e7f6200 100644 --- a/pai/common/consts.py +++ b/pai/common/consts.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import enum import os # Default path for pai config file @@ -19,6 +19,65 @@ "PAI_CONFIG_PATH", os.path.join(os.path.expanduser("~"), ".pai", "config.json") ) +# Default network type used to connect to PAI services +DEFAULT_NETWORK_TYPE = os.environ.get("PAI_NETWORK_TYPE", None) + +# PAI VPC endpoint +PAI_VPC_ENDPOINT = "pai-vpc.{}.aliyuncs.com" + +# All region list, https://help.aliyun.com/document_detail/40654.html +ALIYUN_ALL_REGION_ID_LIST = [ + "cn-qingdao", + "cn-beijing", + "cn-zhangjiakou", + "cn-huhehaote", + "cn-wulanchabu", + "cn-hangzhou", + "cn-shanghai", + "cn-nanjing", + "cn-fuzhou", + "cn-wuhan-lr", + "cn-shenzhen", + "cn-heyuan", + "cn-guangzhou", + "cn-chengdu", + "cn-hongkong", + "ap-southeast-1", + "ap-southeast-2", + "ap-southeast-3", + "ap-southeast-5", + "ap-southeast-6", + "ap-southeast-7", + "ap-northeast-1", + "ap-northeast-2", + "eu-west-1", + "us-east-1", + "eu-central-1", + "eu-west-1", + "me-east-1", + "me-central-1", + "cn-hangzhou-finance", + "cn-shanghai-finance-1", + "cn-shenzhen-finance-1", + "cn-beijing-finance-1", + "cn-north-2-gov-1", +] + + +class Network(enum.Enum): + VPC = "VPC" + PUBLIC = "PUBLIC" + + @classmethod + def from_string(cls, s: str) -> "Network": + try: + return cls[s.upper()] + except KeyError: + raise ValueError( + "Invalid network type: %s, supported types are: %s" + % (s, ", ".join(cls.__members__.keys())) + ) + class JobType(object): """PAI DLCJob/TrainingJob type.""" @@ -97,10 +156,36 @@ class FrameworkTypes(object): class FileSystemInputScheme(object): - # Standard/Extreme/CPFS 1.0 file system type NAS = "nas" # CPFS2.0 file system type CPFS = "cpfs" # BMCPFS file system type BMCPFS = "bmcpfs" + + +class DefaultChannelName(object): + MODEL = "model" + CHECKPOINT = "checkpoints" + TENSORBOARD = "tensorboard" + + +class StoragePathCategory(object): + """PAI builtin remote storage path.""" + + # For inference + InferenceSrc = "inference_src" + + # For evaluation + EvaluationSrc = "evaluation_src" + + # For training job + TrainingSrc = "training_src" + TrainingJob = "training_job" + TrainData = "train_data" + ModelData = "model_data" + + # For processing job + ProcessingJob = "processing_job" + ProcessingSrc = "processing_src" + InputData = "input_data" diff --git a/pai/common/docker_utils.py b/pai/common/docker_utils.py index 6f6507d..d584740 100644 --- a/pai/common/docker_utils.py +++ b/pai/common/docker_utils.py @@ -13,13 +13,14 @@ # limitations under the License. import io -import logging import subprocess import time from random import randint from typing import Any, Dict, List, Optional, Union -logger = logging.getLogger(__name__) +from .logging import get_logger + +logger = get_logger(__name__) def _run_command(command: List[str], input: Optional[str] = None): diff --git a/pai/common/git_utils.py b/pai/common/git_utils.py index 991fa5a..b41983f 100644 --- a/pai/common/git_utils.py +++ b/pai/common/git_utils.py @@ -14,7 +14,6 @@ from __future__ import absolute_import -import logging import os import subprocess import tempfile @@ -24,7 +23,9 @@ import six from six.moves import urllib -logger = logging.getLogger(__name__) +from .logging import get_logger + +logger = get_logger(__name__) def git_clone_repo(git_config: Dict[str, str], source_dir: Optional[str] = None): diff --git a/pai/common/logging.py b/pai/common/logging.py new file mode 100644 index 0000000..6799ac9 --- /dev/null +++ b/pai/common/logging.py @@ -0,0 +1,139 @@ +# Copyright 2023 Alibaba, Inc. or its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +import threading +from typing import Optional + +PAI_LOG_LEVEL = "PAI_LOG_LEVEL" + + +_lock = threading.Lock() +_default_handler: Optional[logging.Handler] = None + +_LOG_LEVEL_MAPPING = { + "debug": logging.DEBUG, + "info": logging.INFO, + "warning": logging.WARNING, + "error": logging.ERROR, + "critical": logging.CRITICAL, +} + +_default_log_level = logging.WARNING + + +def _get_default_logging_level(): + """Get the default logging level for the pai library.""" + level_str = os.getenv(PAI_LOG_LEVEL, None) + if level_str: + if level_str.lower() in _LOG_LEVEL_MAPPING: + return _LOG_LEVEL_MAPPING[level_str.lower()] + else: + logging.getLogger().warning( + f"Unknown option PAI_LOG_LEVEL={level_str}, " + f"has to be one of: { ', '.join(_LOG_LEVEL_MAPPING.keys()) }" + ) + return _default_log_level + + +def _get_library_name() -> str: + return __name__.split(".")[0] + + +def _get_library_root_logger() -> logging.Logger: + return logging.getLogger(_get_library_name()) + + +def _configure_library_root_logger() -> None: + global _default_handler + + with _lock: + if _default_handler: + return + handler = logging.StreamHandler() + library_root_logger = _get_library_root_logger() + library_root_logger.addHandler(handler) + library_root_logger.setLevel(_get_default_logging_level()) + library_root_logger.propagate = False + _default_handler = handler + + +def _reset_library_root_logger() -> None: + global _default_handler + + with _lock: + if not _default_handler: + return + library_root_logger = _get_library_root_logger() + library_root_logger.removeHandler(_default_handler) + library_root_logger.setLevel(logging.NOTSET) + _default_handler = None + + +def get_log_levels_dict(): + return _LOG_LEVEL_MAPPING + + +def get_logger(name: Optional[str] = None) -> logging.Logger: + """ + Return a logger with the specified module name. + """ + + if name is None: + name = _get_library_name() + + _configure_library_root_logger() + return logging.getLogger(name) + + +def get_log_level() -> int: + """ + Return the current level for the "pai" module. + + Returns: + `int`: The logging level. + """ + + _configure_library_root_logger() + return _get_library_root_logger().getEffectiveLevel() + + +def set_log_level(verbosity: int) -> None: + """ + Set the log level for the pai root logger. + """ + + _configure_library_root_logger() + _get_library_root_logger().setLevel(verbosity) + + +def set_log_level_info(): + """Set the log level to the `INFO` level.""" + return set_log_level(logging.INFO) + + +def set_log_level_warning(): + """Set the log level to the `WARNING` level.""" + return set_log_level(logging.WARNING) + + +def set_log_level_debug(): + """Set the log level to the `DEBUG` level.""" + return set_log_level(logging.DEBUG) + + +def set_log_level_error(): + """Set the log level to the `ERROR` level.""" + return set_log_level(logging.ERROR) diff --git a/pai/common/oss_utils.py b/pai/common/oss_utils.py index 23a5066..908b8f5 100644 --- a/pai/common/oss_utils.py +++ b/pai/common/oss_utils.py @@ -15,7 +15,6 @@ from __future__ import absolute_import import glob -import logging import os.path import pathlib import tarfile @@ -29,7 +28,9 @@ from oss2.credentials import Credentials, CredentialsProvider from tqdm.autonotebook import tqdm -logger = logging.getLogger(__name__) +from .logging import get_logger + +logger = get_logger(__name__) class _ProgressCallbackTqdm(tqdm): @@ -449,7 +450,7 @@ def download( class CredentialProviderWrapper(CredentialsProvider): """A wrapper class for the credential provider of OSS.""" - def __init__(self, config: CredentialConfig): + def __init__(self, config: Union[CredentialConfig] = None): self.client = CredentialClient(config) def get_credentials(self) -> Credentials: diff --git a/pai/common/utils.py b/pai/common/utils.py index e5aada1..99d8f89 100644 --- a/pai/common/utils.py +++ b/pai/common/utils.py @@ -14,22 +14,28 @@ from __future__ import absolute_import +import functools +import importlib.util import random import re import socket import string import sys +import time +import warnings +from datetime import datetime from functools import lru_cache -from typing import Callable, Dict, Optional, Union +from typing import Callable, Dict, List, Optional, Union from semantic_version import Version -from pai import __version__ from pai.common.consts import ( + ALIYUN_ALL_REGION_ID_LIST, INSTANCE_TYPE_LOCAL, INSTANCE_TYPE_LOCAL_GPU, FileSystemInputScheme, ) +from pai.version import VERSION DEFAULT_PLAIN_TEXT_ALLOW_CHARACTERS = string.ascii_letters + string.digits + "_" @@ -88,12 +94,18 @@ def make_list_resource_iterator(method: Callable, **kwargs): kwargs.update(page_number=page_number, page_size=page_size) result = method(**kwargs) if isinstance(result, PaginatedResult): + total_count = result.total_count result = result.items + else: + total_count = None for item in result: yield item if len(result) == 0 or len(result) < page_size: return + if total_count and page_number * page_size >= total_count: + return + page_number += 1 @@ -106,7 +118,7 @@ def to_plain_text( def http_user_agent(user_agent: Optional[Union[Dict, str]] = None) -> str: """Generate HTTP User-Agent that represents current client.""" - ua = f"pai-python-sdk/{__version__}; python/{sys.version.split()[0]}" + ua = f"pai-python-sdk/{VERSION}; python/{sys.version.split()[0]}" if isinstance(user_agent, dict): ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items()) elif isinstance(user_agent, str): @@ -117,12 +129,14 @@ def http_user_agent(user_agent: Optional[Union[Dict, str]] = None) -> str: def is_notebook() -> bool: """Return True if current environment is notebook.""" try: + from IPython import get_ipython + shell = get_ipython().__class__.__name__ for parent_cls in shell.__mro__: if parent_cls.__name__ == "ZMQInteractiveShell": return True return False - except NameError: + except (NameError, ImportError): return False @@ -213,6 +227,19 @@ def is_filesystem_uri(uri: str) -> bool: return any(uri.startswith(f"{schema}://") for schema in schemas) +def is_dataset_id(item: str) -> bool: + """Return True if given input is a dataset ID. + + Args: + item (str): user input dataset ID. + + Examples: + >>> is_dataset_id('d-ybko3rap60c4gs9flc') + True + """ + return item.startswith("d-") + + @lru_cache() def is_domain_connectable(domain: str, port: int = 80, timeout: int = 1) -> bool: """Check if the domain is connectable.""" @@ -233,3 +260,214 @@ def is_domain_connectable(domain: str, port: int = 80, timeout: int = 1) -> bool finally: # Close the socket sock.close() + + +def experimental(callable_entity): + """Decorator to mark functions or classes as experimental""" + + @functools.wraps(callable_entity) + def wrapper(*args, **kwargs): + message = f"{callable_entity.__name__} is experimental and may change or be removed in future releases." + warnings.warn(message, category=FutureWarning, stacklevel=2) + return callable_entity(*args, **kwargs) + + return wrapper + + +def retry(max_attempts=3, wait_secs=1, exceptions=(Exception,), report_retries=True): + """Decorator to make functions retry by config""" + + def decorator_retry(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + attempts = 0 + while attempts < max_attempts: + try: + result = func(*args, **kwargs) + return result + except exceptions as e: + attempts += 1 + if attempts == max_attempts: + raise # Re-raise the last exception when all the attempts have failed + if report_retries: + warnings.warn(f"Retry {attempts}/{max_attempts} failed: {e}") + time.sleep(wait_secs) + + return wrapper + + return decorator_retry + + +def print_table(headers: List[str], rows: List[List[str]]): + """Give headers and rows, print as table to stdout.""" + + length = len(headers) + for row in rows: + if len(row) != length: + raise ValueError("Unable to print table, headers length mismatch with rows") + + column_widths = [ + max(len(str(value)) for value in column) for column in zip(headers, *rows) + ] + header_row = " | ".join( + f"{header:<{column_widths[i]}}" for i, header in enumerate(headers) + ) + + print(header_row) + print("-" * len(header_row)) + for row in rows: + print( + " | ".join( + f"{str(value):<{column_widths[i]}}" for i, value in enumerate(row) + ) + ) + + +def is_package_available(package_name: str) -> bool: + """Check if the package is available in the current environment.""" + return True if importlib.util.find_spec(package_name) is not None else False + + +def timestamp(sep: str = "-", utc: bool = False) -> str: + """Return a timestamp with millisecond precision. + + Args: + sep: The separator between date and time. + utc: Whether to use UTC time. + + Returns: + str: A timestamp with millisecond precision. + + """ + if utc: + res = datetime.utcnow().strftime("%Y%m%d-%H%M%S-%f")[:-3] + else: + res = datetime.now().strftime("%Y%m%d-%H%M%S-%f")[:-3] + if sep != "-": + res = res.replace("-", sep) + return res + + +def name_from_base(base_name: str, sep: str = "-") -> str: + """Return a name with base_name and timestamp. + + Args: + base_name: The base name of the returned name. + sep: The separator between base_name and timestamp. + + Returns: + str: A name with base_name and timestamp. + + """ + return "{base_name}{sep}{timestamp}".format( + base_name=base_name, sep=sep, timestamp=timestamp(sep=sep, utc=False) + ) + + +def parse_region_id_from_endpoint(endpoint) -> str: + if endpoint: + for region_id in ALIYUN_ALL_REGION_ID_LIST: + if region_id in endpoint: + return region_id + return None + + +def parse_oss_uri(uri): + if uri.startswith("oss://"): + match = re.match(r"^oss://([^./]+)\.([^./]+)\.aliyuncs\.com(?:/(.+))?", uri) + if not match: + warnings.warn("Invalid OSS URI format.") + return None + bucket_name, endpoint, path = match.groups() + region_id = parse_region_id_from_endpoint(endpoint) + if not region_id: + warnings.warn("Invalid OSS URI format.") + return None + return bucket_name, region_id, "/" if path is None else path + return None + + +def parse_nas_uri(uri): + if uri.startswith("nas://"): + match = re.match(r"^nas://([^./]+)\.([^/]+)(?:/(.+))?", uri) + if not match: + warnings.warn("Invalid NAS URI format.") + return None + endpoint = match.groups()[1] + region_id = parse_region_id_from_endpoint(endpoint) + if not region_id: + warnings.warn("Invalid NAS URI format.") + return None + return uri, region_id + return None + + +def parse_cpfs_uri(uri): + if uri.startswith("cpfs://"): + match = re.match(r"^cpfs://([^./]+)\.([^/]+)(?:/(.+))?", uri) + if not match: + warnings.warn("Invalid CPFS URI format.") + return None + endpoint = match.groups()[1] + region_id = parse_region_id_from_endpoint(endpoint) + if not region_id: + warnings.warn("Invalid CPFS URI format.") + return None + return uri, region_id + return None + + +def parse_bmcpfs_uri(uri): + if uri.startswith("bmcpfs://"): + match = re.match(r"^bmcpfs://([^./]+)\.([^/]+)(?:/(.+))?", uri) + if not match: + warnings.warn("Invalid BMCPFS URI format.") + return None + endpoint = match.groups()[1] + region_id = parse_region_id_from_endpoint(endpoint) + if not region_id: + warnings.warn("Invalid BMCPFS URI format.") + return None + return uri, region_id + return None + + +def parse_local_file_uri(uri): + if uri.startswith("file:///"): + match = re.match(r"^file://(.+)", uri) + if not match: + warnings.warn("Invalid local file URI format.") + return None + return match.group(1) + return None + + +def parse_pai_dataset_uri(uri): + if uri.startswith("pai://datasets"): + match = re.match(r"^pai://datasets/([^/]+)(?:/(.+))?", uri) + if not match: + warnings.warn("Invalid PAI dataset URI format.") + return None + dataset_id, dataset_version = match.groups() + dataset_version = dataset_version if dataset_version else "1" + dataset_version = ( + dataset_version.split("/")[0] if "/" in dataset_version else dataset_version + ) + return dataset_id, dataset_version + return None + + +def parse_odps_uri(uri): + if uri.startswith("odps://"): + match = re.match(r"^odps://(.+)/tables/(.+)", uri) + if not match: + warnings.warn("Invalid MaxCompute URI format.") + return None + project_and_schema, table_name = match.groups() + project_name, schema = ( + project_and_schema.split("/") + if "/" in project_and_schema + else (project_and_schema, None) + ) + return project_name, schema, table_name + return None diff --git a/pai/dataset.py b/pai/dataset.py new file mode 100644 index 0000000..236d202 --- /dev/null +++ b/pai/dataset.py @@ -0,0 +1,41 @@ +# Copyright 2024 Alibaba, Inc. or its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Dict, List, Optional + +from .common import ProviderAlibabaPAI +from .common.logging import get_logger +from .common.utils import make_list_resource_iterator +from .session import Session, get_default_session + +logger = get_logger(__name__) + + +def list_common_datasets( + name: str = None, + session: Optional[Session] = None, +) -> List[Dict[str, Any]]: + session = session or get_default_session() + + gen = make_list_resource_iterator( + session.dataset_api.list, + name=name, + provider=ProviderAlibabaPAI, + # set the workspace_id manually, prevent using the default workspace of the + # session. + workspace_id=0, + order="DESC", + ) + + return gen diff --git a/pai/estimator.py b/pai/estimator.py index 5441692..2ad604e 100644 --- a/pai/estimator.py +++ b/pai/estimator.py @@ -12,48 +12,39 @@ # See the License for the specific language governing permissions and # limitations under the License. -import distutils.dir_util -import json -import logging -import os -import posixpath -import re -import shlex -import shutil -import tempfile -import textwrap -import time import webbrowser from abc import ABCMeta, abstractmethod -from concurrent.futures import ThreadPoolExecutor from datetime import datetime from typing import Any, Dict, List, Optional, Union -from .api.entity_base import EntityBaseMixin -from .common import ProviderAlibabaPAI, git_utils -from .common.consts import INSTANCE_TYPE_LOCAL_GPU, FileSystemInputScheme, JobType -from .common.docker_utils import ContainerRun, run_container -from .common.oss_utils import OssUriObj, download, is_oss_uri, upload -from .common.utils import ( - is_filesystem_uri, - is_local_run_instance_type, - is_odps_table_uri, - make_list_resource_iterator, - random_str, - to_plain_text, +from .common import git_utils +from .common.consts import FileSystemInputScheme, JobType +from .common.logging import get_logger +from .common.utils import is_local_run_instance_type, make_list_resource_iterator +from .job import ( + AlgorithmSpec, + Channel, + HyperParameterDefinition, + LocalTrainingJob, + TrainingJob, + UriOutput, + _TrainingJobSubmitter, +) +from .job._training_job import ( + DEFAULT_CHECKPOINT_CHANNEL_NAME, + DEFAULT_OUTPUT_MODEL_CHANNEL_NAME, + DEFAULT_TENSORBOARD_CHANNEL_NAME, + ExperimentConfig, + ResourceType, + SpotSpec, + UserVpcConfig, ) -from .exception import UnexpectedStatusException from .model import InferenceSpec, Model, ResourceConfig from .predictor import Predictor -from .schema.training_job_schema import TrainingJobSchema from .serializers import SerializerBase from .session import Session, get_default_session -logger = logging.getLogger(__name__) - -DEFAULT_OUTPUT_MODEL_CHANNEL_NAME = "model" -DEFAULT_CHECKPOINT_CHANNEL_NAME = "checkpoints" -DEFAULT_TENSORBOARD_CHANNEL_NAME = "tensorboard" +logger = get_logger(__name__) class HyperParameterType(object): @@ -178,47 +169,7 @@ def to_input_uri(self): ) -class UserVpcConfig(object): - """UserVpcConfig is used to give training job access to resources in your VPC.""" - - def __init__( - self, - vpc_id: str, - security_group_id: str, - switch_id: Optional[str] = None, - extended_cidrs: List[str] = None, - ): - """Initialize UserVpcConfig. - - Args: - vpc_id (str): Specifies the ID of the VPC that training job instance - connects to. - security_group_id (str): The ID of the security group that training job - instances belong to. - switch_id (str, optional): The ID of the vSwitch to which the instance - belongs. Defaults to None. - extended_cidrs (List[str], optional): The CIDR blocks configured for the - ENI of the training job instance. If it is not specified, the CIDR block - will be configured as the same as the VPC network segmentation, which - means that the training job instance can access all resources in the - VPC. Defaults to None. - """ - - self.vpc_id = vpc_id - self.security_group_id = security_group_id - self.switch_id = switch_id - self.extended_cidrs = extended_cidrs - - def to_dict(self): - return { - "VpcId": self.vpc_id, - "SecurityGroupId": self.security_group_id, - "SwitchId": self.switch_id, - "ExtendedCIDRs": self.extended_cidrs, - } - - -class EstimatorBase(metaclass=ABCMeta): +class EstimatorBase(_TrainingJobSubmitter, metaclass=ABCMeta): """EstimatorBase is the base class for other Estimator classes, such as Estimator. The EstimatorBase class contains common attributes and methods for all estimators, @@ -235,9 +186,18 @@ def __init__( max_run_time: Optional[int] = None, output_path: Optional[str] = None, checkpoints_path: Optional[str] = None, + environments: Optional[Dict[str, str]] = None, + requirements: Optional[List[str]] = None, instance_type: Optional[str] = None, + spot_spec: Optional[SpotSpec] = None, + instance_spec: Optional[Dict] = None, + resource_id: Optional[Dict] = None, + resource_type: Optional[Union[str, ResourceType]] = None, instance_count: Optional[int] = None, user_vpc_config: Optional[UserVpcConfig] = None, + experiment_config: Optional[ExperimentConfig] = None, + settings: Optional[Dict[str, Any]] = None, + labels: Optional[Dict[str, str]] = None, session: Optional[Session] = None, ): """EstimatorBase constructor. @@ -289,12 +249,26 @@ def __init__( checkpoints_path (str, optional): An OSS URI that stores the checkpoint of the training job. If provided, the OSS URI will be mounted to the directory `/ml/output/checkpoints/`. + environments: A dictionary that maps environment variable names to their values. + This optional field allows you to provide a set of environment variables that will be + applied to the context where the code is executed. + requirements (list, optional): An optional list of strings that specifies the Python + package dependencies with their versions. Each string in the list should be in the format + 'package' or 'package==version'. This is similar to the contents of a requirements.txt file used + in Python projects. If requirements.txt is provided in user code directory, requirements + will override the conflict dependencies directly. + resource_type (str, optional): The resource type used to run the training job. + By default, general computing resource is used. If the resource_type is + 'Lingjun', Lingjun computing resource is used. instance_type (str, optional): The machine instance type used to run the training job. To view the supported machine instance types, please refer to the document: https://help.aliyun.com/document_detail/171758.htm#section-55y-4tq-84y. If the instance_type is "local", the training job is executed locally using docker. + spot_spec (:class:`pai.job.SpotSpec`, optional): The specification of the spot + instance used to run the training job. If provided, the training job will + use the spot instance to run the training job. instance_count (int): The number of machines used to run the training job. user_vpc_config (:class:`pai.estimator.UserVpcConfig`, optional): The VPC configuration used to enable the training job instance to connect to the @@ -302,22 +276,40 @@ def __init__( be created and attached to the training job instance, allowing the instance to access the resources within the specified VPC. Default to None. + experiment_config (:class:`pai.estimator.ExperimentConfig`, optional): The + experiment configuration used to construct the relationship between the + training job and the experiment. If provided, the training job will belong + to the specified experiment, in which case the training job will use + artifact_uri of experiment as default output path. Default to None. + settings (dict, optional): A dictionary that represents the additional settings + for job, such as AIMaster configurations. + labels (Dict[str, str], optional): A dictionary that maps label names to + their values. This optional field allows you to provide a set of labels + that will be applied to the training job. session (Session, optional): A PAI session instance used for communicating with PAI service. """ self.hyperparameters = hyperparameters or dict() - self.instance_type = instance_type - self.instance_count = instance_count if instance_count else 1 - self.max_run_time = max_run_time - self.base_job_name = base_job_name - self.output_path = output_path - self.user_vpc_config = user_vpc_config self.checkpoints_path = checkpoints_path self.session = session or get_default_session() - self._latest_training_job = None - - self._check_instance_type() + super().__init__( + base_job_name=base_job_name, + output_path=output_path, + experiment_config=experiment_config, + instance_type=instance_type, + instance_count=instance_count, + resource_id=resource_id, + resource_type=resource_type, + spot_spec=spot_spec, + instance_spec=instance_spec, + user_vpc_config=user_vpc_config, + max_run_time=max_run_time, + environments=environments, + requirements=requirements, + settings=settings, + labels=labels, + ) def set_hyperparameters(self, **kwargs): """Set hyperparameters for the training job. @@ -327,21 +319,6 @@ def set_hyperparameters(self, **kwargs): """ self.hyperparameters.update(**kwargs) - @property - def latest_training_job(self): - """Return the latest submitted training job.""" - return self._latest_training_job - - def _check_instance_type(self): - """Check if the given instance_type is supported for training job.""" - if not is_local_run_instance_type( - self.instance_type - ) and not self.session.is_supported_training_instance(self.instance_type): - raise ValueError( - f"Instance type='{self.instance_type}' is not supported." - " Please provide a supported instance type to create the job." - ) - def _gen_job_display_name(self, job_name=None): """Generate job display name.""" if job_name: @@ -349,143 +326,12 @@ def _gen_job_display_name(self, job_name=None): ts = datetime.now().strftime("%Y%m%d_%H%M%S") return "{}_{}".format(self.base_job_name or "training_job", ts) - def _get_input_uri(self, item: str): - """Get input uri for training_job from given input.""" - if not isinstance(item, (str, FileSystemInputBase)): - raise ValueError(f"Input data of type {type(item)} is not supported.") - - if isinstance(item, FileSystemInputBase): - input_uri = item.to_input_uri() - elif is_oss_uri(item) or is_filesystem_uri(item) or is_odps_table_uri(item): - input_uri = item - elif os.path.exists(item): - store_path = self.session.get_storage_path_by_category("train_data") - input_uri = upload(item, store_path) - else: - raise ValueError( - "Invalid input data, supported inputs are OSS, NAS, MaxCompute " - "table or local path." - ) - - return input_uri - - def _build_input_data_configs( - self, - inputs: Dict[str, Any] = None, - input_channel_defs: Optional[List[Dict[str, str]]] = None, - ) -> List[Dict[str, str]]: - """Build the input data config for the training job.""" - res = [] - - if input_channel_defs: - remains = set(inputs.keys()) - for channel in input_channel_defs: - channel_name = channel["Name"] - channel_required = channel["Required"] - if channel_name in inputs: - input_uri = self._get_input_uri(inputs[channel_name]) - res.append({"Name": channel_name, "InputUri": input_uri}) - remains.remove(channel_name) - elif channel_required: - raise ValueError( - f"Input channel {channel_name} is required but not provided." - " Please check the input channels definition." - ) - if remains: - raise ValueError( - f"Following input channels={list(remains)} are not defined in input" - " channels definition. Please check the input channels definition." - ) - else: - for name, item in inputs.items(): - input_uri = self._get_input_uri(item) - res.append({"Name": name, "InputUri": input_uri}) - - return res - - def _generate_job_base_output_path(self, job_name: str) -> str: - """Generate the base output path for the training job.""" - bucket = self.session.oss_bucket - bucket_name = bucket.bucket_name - # replace non-alphanumeric character in training job name. - name = to_plain_text(job_name) - - if self.output_path: - return os.path.join(self.output_path, f"{name}_{random_str(6)}") - else: - job_output_path = self.session.get_storage_path_by_category( - "training_job", f"{name}_{random_str(6)}" - ) - return f"oss://{bucket_name}/{job_output_path}" - - @classmethod - def _get_default_output_channel_defs(cls): - channel_defs = [ - { - "Name": DEFAULT_OUTPUT_MODEL_CHANNEL_NAME, - }, - { - "Name": DEFAULT_CHECKPOINT_CHANNEL_NAME, - }, - { - "Name": DEFAULT_TENSORBOARD_CHANNEL_NAME, - "Properties": { - "ossAppendable": "true", - }, - }, - ] - return channel_defs - - def _build_output_data_configs( - self, job_name: str, output_channel_defs: List[Dict[str, str]] - ) -> List[Dict[str, str]]: - """Build the output data config for the training job.""" - job_base_output_path = self._generate_job_base_output_path(job_name) - - # OSS URI for output channel will be mounted to directory - # "/ml/output/{ChannelName}/" and the output OSS URI should be a "directory" - def as_oss_dir_uri(uri: str): - return uri if uri.endswith("/") else uri + "/" - - res = [] - for ch in output_channel_defs: - # if checkpoint path is provided, use it as the checkpoint channel output. - if ch["Name"] == DEFAULT_CHECKPOINT_CHANNEL_NAME and self.checkpoints_path: - oss_uri = self.checkpoints_path - else: - oss_uri = as_oss_dir_uri( - posixpath.join(job_base_output_path, ch["Name"]) - ) - res.append( - { - "Name": ch["Name"], - "OutputUri": oss_uri, - } - ) - - return res - @abstractmethod def fit( self, inputs: Dict[str, Any] = None, wait: bool = True, show_logs: bool = True ): """Submit a training job with the given input data.""" - def wait(self, show_logs: bool = True): - """Block until the latest training job is completed. - - Args: - show_logs(bool): Specifies whether to fetch and print the logs produced by - the training job. - - Raises: - RuntimeError: If no training job is submitted. - - """ - if not self._latest_training_job: - raise RuntimeError("Could not find a submitted training job.") - self._latest_training_job.wait(show_logs=show_logs) - def model_data(self) -> str: """Model data output path. @@ -493,18 +339,18 @@ def model_data(self) -> str: str: A string in OSS URI format refers to the output model of the submitted job. """ - if not self._latest_training_job: + if not self.latest_job: raise RuntimeError( "No TrainingJob for the estimator, output model data not found." ) - if not self._latest_training_job.is_succeeded(): + if not self.latest_job.is_succeeded(): logger.warning( "The TrainingJob is currently not in a succeeded status, which means" " that the model data output may not be accessible." ) - return self._latest_training_job.output_path( + return self.latest_job.output_path( channel_name=DEFAULT_OUTPUT_MODEL_CHANNEL_NAME ) @@ -515,14 +361,12 @@ def checkpoints_data(self) -> str: str: A string in OSS URI format refers to the checkpoints of submitted training job. """ - if not self._latest_training_job: + if not self.latest_job: raise RuntimeError( "No TrainingJob for the Estimator, output checkpoints data path " "not found." ) - return self._latest_training_job.output_path( - channel_name=DEFAULT_CHECKPOINT_CHANNEL_NAME - ) + return self.latest_job.output_path(channel_name=DEFAULT_CHECKPOINT_CHANNEL_NAME) def tensorboard_data(self) -> str: """Output TensorBoard logs path. @@ -531,15 +375,60 @@ def tensorboard_data(self) -> str: str: A string in OSS URI format refers to the tensorboard log of submitted training job. """ - if not self._latest_training_job: + if not self.latest_job: raise RuntimeError( "No TrainingJob for the Estimator, output TensorBoard logs data path" " not found." ) - return self._latest_training_job.output_path( + return self.latest_job.output_path( channel_name=DEFAULT_TENSORBOARD_CHANNEL_NAME, ) + def tensorboard(self, wait=True): + """Launch a TensorBoard Application to view the output TensorBoard logs. + + Args: + wait (bool): Specifies whether to block until the TensorBoard is running. + + Returns: + :class:`pai.tensorboard.TensorBoard`: A TensorBoard instance. + """ + from pai.tensorboard import TensorBoard + + if not self.latest_job: + raise RuntimeError("Could not find a submitted training job.") + + source_type = "TrainingJob" + if isinstance(self.latest_job, LocalTrainingJob): + raise RuntimeError("Local training job does not support tensorboard.") + res = self.session.tensorboard_api.list( + source_type=source_type, + source_id=self.latest_job.training_job_id, + ) + + if res.items: + if len(res.items) > 1: + logger.warning( + "Found multiple TensorBoard instances for the submitted training " + "job, use the first one." + ) + tb_id = res.items[0]["TensorboardId"] + tb = TensorBoard(tensorboard_id=tb_id, session=self.session) + tb.start(wait=wait) + else: + tb = TensorBoard.create( + uri=self.tensorboard_data(), + wait=wait, + display_name=self.latest_job.training_job_name, + source_id=self.latest_job.training_job_id, + source_type=source_type, + session=self.session, + ) + + # Open the TensorBoard in the default browser. + webbrowser.open(tb.app_uri) + return tb + def create_model(self, inference_spec: Union[InferenceSpec, Dict]) -> Model: """Create a Model object using output model of the training job. @@ -647,11 +536,13 @@ class Estimator(EstimatorBase): def __init__( self, image_uri: str, - command: str, + command: Union[str, List[str]], source_dir: Optional[str] = None, git_config: Optional[Dict[str, str]] = None, job_type: str = JobType.PyTorchJob, hyperparameters: Optional[Dict[str, Any]] = None, + environments: Optional[Dict[str, str]] = None, + requirements: Optional[List[str]] = None, base_job_name: Optional[str] = None, max_run_time: Optional[int] = None, checkpoints_path: Optional[str] = None, @@ -660,7 +551,10 @@ def __init__( instance_type: Optional[str] = None, instance_count: Optional[int] = None, user_vpc_config: Optional[UserVpcConfig] = None, + experiment_config: Optional[ExperimentConfig] = None, + resource_id: Optional[str] = None, session: Optional[Session] = None, + **kwargs, ): """Estimator constructor. @@ -669,7 +563,7 @@ def __init__( provided by PAI or a user customized image. To view the images provided by PAI, please refer to the document: https://help.aliyun.com/document_detail/202834.htm. - command (str): The command used to run the training job. + command (Union[str, List[str]]): The command used to run the training job. source_dir (str, optional): The local source code directory used in the training job. The directory will be packaged and uploaded to an OSS bucket, then downloaded to the `/ml/usercode` directory in the training @@ -715,6 +609,14 @@ def __init__( hyperparameters used in the training job. The hyperparameters will be stored in the `/ml/input/config/hyperparameters.json` as a JSON dictionary in the training container. + environments: A dictionary that maps environment variable names to their values. + This optional field allows you to provide a set of environment variables that will be + applied to the context where the code is executed. + requirements (list, optional): An optional list of strings that specifies the Python + package dependencies with their versions. Each string in the list should be in the format + 'package' or 'package==version'. This is similar to the contents of a requirements.txt file used + in Python projects. If requirements.txt is provided in user code directory, requirements + will override the conflict dependencies directly. instance_type (str): The machine instance type used to run the training job. To view the supported machine instance types, please refer to the document: @@ -736,6 +638,11 @@ def __init__( be created and attached to the training job instance, allowing the instance to access the resources within the specified VPC. Default to None. + experiment_config(:class:`pai.estimator.ExperimentConfig`, optional): The + experiment configuration used to construct the relationship between the + training job and the experiment. If provided, the training job will belong + to the specified experiment, in which case the training job will use + artifact_uri of experiment as default output path. Default to None. output_path (str, optional): An OSS URI to store the outputs of the training jobs. If not provided, an OSS URI will be generated using the default OSS bucket in the session. When the `estimator.fit` method is called, @@ -805,6 +712,8 @@ def __init__( super(Estimator, self).__init__( hyperparameters=hyperparameters, + environments=environments, + requirements=requirements, base_job_name=base_job_name, max_run_time=max_run_time, output_path=output_path, @@ -812,11 +721,11 @@ def __init__( instance_type=instance_type, instance_count=instance_count, user_vpc_config=user_vpc_config, + experiment_config=experiment_config, + resource_id=resource_id, session=session, ) - self.__uploaded_source_files = None - def training_image_uri(self) -> str: """Return the Docker image to use for training. @@ -837,69 +746,36 @@ def _prepare_for_training(self): ) self.source_dir = updated_args["source_dir"] - def _upload_source_files(self, job_name: str) -> Optional[str]: - """Upload local source files to OSS.""" - if not self.source_dir: - return - - if is_oss_uri(self.source_dir): - return self.source_dir - elif not os.path.exists(self.source_dir): - raise ValueError(f"Source directory {self.source_dir} does not exist.") - # compress the source files to a Tar Gz file and upload to OSS bucket. - upload_data_path = self.session.get_storage_path_by_category( - "training_src", to_plain_text(job_name) - ) - self.__uploaded_source_files = upload( - source_path=self.source_dir, - oss_path=upload_data_path, - bucket=self.session.oss_bucket, - is_tar=True, - ) - return self.__uploaded_source_files - - def _build_code_input(self, job_name: str) -> Optional[Dict[str, Any]]: - """Build a dict to represent AlgorithmSpecCodeDir used in the TrainingJob.""" - upload_source_files = self._upload_source_files(job_name) - if not upload_source_files: - return - oss_uri_obj = OssUriObj( - uri=self.session.patch_oss_endpoint(upload_source_files) - ) - - code_dir = { - "LocationType": "oss", - "LocationValue": { - "Bucket": oss_uri_obj.bucket_name, - "Key": oss_uri_obj.object_key, - "Endpoint": oss_uri_obj.endpoint, - }, - } - return code_dir - def _build_algorithm_spec( - self, - code_input, - ) -> Dict[str, Any]: + self, code_input, inputs: Dict[str, Any] + ) -> AlgorithmSpec: """Build a temporary AlgorithmSpec used for submitting the TrainingJob.""" - command = [ - "/bin/sh", - "-c", - self.command, - ] - algo_spec = { - "Command": command, - "Image": self.training_image_uri(), - "JobType": self.job_type, - "MetricDefinitions": self.metric_definitions, - "CodeDir": code_input, - "OutputChannels": self._get_default_output_channel_defs(), - } - return algo_spec + algorithm_spec = AlgorithmSpec( + command=( + self.command + if isinstance(self.command, list) + else ["sh", "-c", self.command] + ), + image=self.training_image_uri(), + job_type=self.job_type, + metric_definitions=self.metric_definitions, + code_dir=code_input, + output_channels=self._default_training_output_channels(), + input_channels=[ + Channel(name=channel_name, required=False) + for channel_name in inputs.keys() + ], + ) + return algorithm_spec def fit( - self, inputs: Dict[str, Any] = None, wait: bool = True, show_logs: bool = True - ): + self, + inputs: Dict[str, Any] = None, + outputs: Dict[str, Any] = None, + wait: bool = True, + show_logs: bool = True, + job_name: Optional[str] = None, + ) -> Union[TrainingJob, LocalTrainingJob]: """Submit a training job with the given input data. Args: @@ -908,119 +784,111 @@ def fit( the key is the channel name, and the value is the input data. The input data can be an OSS URI or a NAS URI object and will be mounted to the `/ml/input/data/{channel_name}` directory in the training container. + outputs (Dict[str, Any]): A dictionary representing the output locations for + the training job. Each key/value pair in the dictionary is an output channel, + the key is the channel name, and the value is the output data location. wait (bool): Specifies whether to block until the training job is completed, either succeeded, failed, or stopped. (Default True). show_logs (bool): Specifies whether to show the logs produced by the training job (Default True). + job_name (str, optional): The name of the training job. + + Returns: + :class:`pai.job.TrainingJob` or :class:`pai.job.LocalTrainingJob`: A + submitted training job. + Raises: UnExpectedStatusException: If the training job fails. """ inputs = inputs or dict() self._prepare_for_training() - job_name = self._gen_job_display_name() + job_name = self.job_name(job_name=job_name) if is_local_run_instance_type(self.instance_type): - training_job = self._local_run( - job_name=job_name, inputs=inputs, instance_type=self.instance_type - ) - else: - training_job = self._fit(inputs=inputs, job_name=job_name) - self._latest_training_job = training_job - - if wait: - self.wait(show_logs=show_logs) - - def tensorboard(self, wait=True): - """Launch a TensorBoard Application to view the output TensorBoard logs. - - Args: - wait (bool): Specifies whether to block until the TensorBoard is running. - - Returns: - :class:`pai.tensorboard.TensorBoard`: A TensorBoard instance. - """ - from pai.tensorboard import TensorBoard - - if not self.latest_training_job: - raise RuntimeError("Could not find a submitted training job.") - - source_type = "TrainingJob" - if isinstance(self.latest_training_job, _LocalTrainingJob): - raise RuntimeError("Local training job does not support tensorboard.") - res = self.session.tensorboard_api.list( - source_type=source_type, - source_id=self.latest_training_job.training_job_id, - ) - - if res.items: - if len(res.items) > 1: - logger.warning( - "Found multiple TensorBoard instances for the submitted training " - "job, use the first one." - ) - tb_id = res.items[0]["TensorboardId"] - tb = TensorBoard(tensorboard_id=tb_id, session=self.session) - tb.start(wait=wait) - else: - tb = TensorBoard.create( - uri=self.tensorboard_data(), + return self._local_run( + job_name=job_name, + inputs=inputs, + instance_type=self.instance_type, wait=wait, - display_name=self._latest_training_job.training_job_name, - source_id=self.latest_training_job.training_job_id, - source_type=source_type, - session=self.session, ) - - # Open the TensorBoard in the default browser. - webbrowser.open(tb.app_uri) - return tb - - def _fit(self, job_name, inputs: Dict[str, Any] = None): - input_configs = self._build_input_data_configs(inputs) - output_configs = self._build_output_data_configs( - job_name, output_channel_defs=self._get_default_output_channel_defs() + return self._fit( + inputs=inputs, + outputs=outputs, + job_name=job_name, + wait=wait, + show_logs=show_logs, ) + + def _fit( + self, + job_name, + inputs: Dict[str, Any], + outputs: Dict[str, Any], + wait: bool = True, + show_logs: bool = True, + ) -> TrainingJob: # prepare input code. - code_input = self._build_code_input(job_name) + code_input = self._build_code_input(job_name, source_dir=self.source_dir) algo_spec = self._build_algorithm_spec( code_input=code_input, + inputs=inputs, + ) + inputs = self.build_inputs( + inputs=inputs, + input_channels=algo_spec.input_channels, ) - training_job_id = self.session.training_job_api.create( - instance_count=self.instance_count, - instance_type=self.instance_type, + outputs = outputs or {} + if self.checkpoints_path: + outputs.update({DEFAULT_CHECKPOINT_CHANNEL_NAME: self.checkpoints_path}) + + outputs = self.build_outputs( job_name=job_name, - hyperparameters=self.hyperparameters, - max_running_in_seconds=self.max_run_time, - input_channels=input_configs, - output_channels=output_configs, - algorithm_spec=algo_spec, - user_vpc_config=self.user_vpc_config.to_dict() - if self.user_vpc_config - else None, + output_channels=algo_spec.output_channels, + outputs=outputs, ) - training_job = _TrainingJob.get(training_job_id) - print( - f"View the job detail by accessing the console URI: {training_job.console_uri}" + + return self._submit( + job_name=job_name, + algorithm_spec=algo_spec, + instance_spec=self.instance_spec, + instance_type=self.instance_type, + instance_count=self.instance_count, + resource_id=self.resource_id, + hyperparameters=self.hyperparameters, + environments=self.environments, + requirements=self.requirements, + max_run_time=self.max_run_time, + inputs=inputs, + outputs=outputs, + user_vpc_config=self.user_vpc_config if self.user_vpc_config else None, + experiment_config=( + self.experiment_config if self.experiment_config else None + ), + labels=self.labels, + wait=wait, + show_logs=show_logs, ) - return training_job def _local_run( self, job_name, instance_type: str, inputs: Dict[str, Any] = None, - ) -> "_LocalTrainingJob": + wait: bool = True, + ) -> "LocalTrainingJob": if self.instance_count > 1: raise RuntimeError("Local training job only supports single instance.") - training_job = _LocalTrainingJob( + training_job = LocalTrainingJob( estimator=self, inputs=inputs, job_name=job_name, instance_type=instance_type, ) training_job.run() + if wait: + training_job.wait() return training_job @@ -1063,8 +931,10 @@ def __init__( algorithm_name: Optional[str] = None, algorithm_version: Optional[str] = None, algorithm_provider: Optional[str] = None, - algorithm_spec: Optional[Dict[str, Any]] = None, + algorithm_spec: Optional[AlgorithmSpec] = None, hyperparameters: Optional[Dict[str, Any]] = None, + environments: Optional[Dict[str, str]] = None, + requirements: Optional[List[str]] = None, base_job_name: Optional[str] = None, max_run_time: Optional[int] = None, output_path: Optional[str] = None, @@ -1072,6 +942,8 @@ def __init__( instance_count: Optional[int] = None, user_vpc_config: Optional[UserVpcConfig] = None, session: Optional[Session] = None, + instance_spec: Optional[Dict[str, Union[int, str]]] = None, + **kwargs, ): """Initialize an AlgorithmEstimator. @@ -1086,11 +958,19 @@ def __init__( a PAI official algorithm. If not provided, the default provider is user's PAI account. If algorithm name is not provided, this argument will be ignored. - algorithm_spec (Dict[str, Any], optional): A temporary algorithm spec. + algorithm_spec (AlgorithmSpec, optional): A temporary algorithm spec. Required if algorithm_name is not provided. hyperparameters (dict, optional): A dictionary that represents the hyperparameters used in the training job. Default hyperparameters will be retrieved from the algorithm definition. + environments: A dictionary that maps environment variable names to their values. + This optional field allows you to provide a set of environment variables that will be + applied to the context where the code is executed. + requirements (list, optional): An optional list of strings that specifies the Python + package dependencies with their versions. Each string in the list should be in the format + 'package' or 'package==version'. This is similar to the contents of a requirements.txt file used + in Python projects. If requirements.txt is provided in user code directory, requirements + will override the conflict dependencies directly. base_job_name (str, optional): The base name used to generate the training job name. If not provided, a default job name will be generated. max_run_time (int, optional): The maximum time in seconds that the training @@ -1134,7 +1014,9 @@ def __init__( algorithm_version=algorithm_version, algorithm_provider=algorithm_provider, ) - self._algo_spec = _algo_version["AlgorithmSpec"] + self._algo_spec = AlgorithmSpec.model_validate( + _algo_version["AlgorithmSpec"] + ) self.algorithm_name = _algo_version["AlgorithmName"] self.algorithm_version = _algo_version["AlgorithmVersion"] self.algorithm_provider = _algo_version["AlgorithmProvider"] @@ -1147,17 +1029,21 @@ def __init__( self.algorithm_provider = None self.algorithm_spec = algorithm_spec + if not instance_type and not instance_spec: + instance_type = self._get_default_training_instance_type() super(AlgorithmEstimator, self).__init__( hyperparameters=self._get_hyperparameters(hyperparameters), + environments=environments, + requirements=requirements, base_job_name=base_job_name, max_run_time=max_run_time, output_path=output_path, - instance_type=instance_type - if instance_type - else self._get_default_training_instance_type(), + instance_type=instance_type, instance_count=instance_count, session=session, user_vpc_config=user_vpc_config, + instance_spec=instance_spec, + **kwargs, ) # TODO: check if the hyperparameters are valid @@ -1166,37 +1052,32 @@ def set_hyperparameters(self, **kwargs): super(AlgorithmEstimator, self).set_hyperparameters(**kwargs) @property - def hyperparameter_definitions(self) -> List[Dict[str, Any]]: + def hyperparameter_definitions(self) -> List[HyperParameterDefinition]: """Get the hyperparameter definitions from the algorithm spec.""" - res = self._algo_spec.get("HyperParameters", []) + res = self._algo_spec.hyperparameter_definitions return res @property - def input_channel_definitions(self) -> List[Dict[str, Any]]: + def input_channel_definitions(self) -> List[Channel]: """Get the input channel definitions from the algorithm spec.""" - res = self._algo_spec.get("InputChannels", []) + res = self._algo_spec.input_channels return res @property - def output_channel_definitions(self) -> List[Dict[str, Any]]: + def output_channel_definitions(self) -> List[Channel]: """Get the output channel definitions from the algorithm spec.""" - res = self._algo_spec.get("OutputChannels", []) + res = self._algo_spec.output_channels return res @property def supported_instance_types(self) -> List[str]: """Get the supported instance types from the algorithm spec.""" - res = ( - self._algo_spec["SupportedInstanceTypes"] - if "SupportedInstanceTypes" in self._algo_spec - else [] - ) - return res + return self._algo_spec.supported_instance_types def _check_args( self, algorithm_name: str, - algorithm_spec: Dict[str, Any], + algorithm_spec: Optional[AlgorithmSpec], ): """Check the algorithm_name and algorithm_spec. @@ -1206,7 +1087,7 @@ def _check_args( Args: algorithm_name (str): The name of the algorithm. - algorithm_spec (dict): The algorithm spec. + algorithm_spec (AlgorithmSpec): The algorithm spec. """ if not algorithm_name and not algorithm_spec: raise ValueError( @@ -1220,14 +1101,6 @@ def _check_args( " The provided algorithm_spec will be ignored." ) - def _check_instance_type(self): - """Check if the given instance_type is supported for training job.""" - if not self.session.is_supported_training_instance(self.instance_type): - raise ValueError( - f"Instance type='{self.instance_type}' is not supported." - " Please provide a supported instance type to create the job." - ) - def _get_algo_version( self, algorithm_name: str, @@ -1299,9 +1172,9 @@ def _get_hyperparameters( if hps_def: # Get default hyperparameters. for hp in hps_def: - hp_name = hp.get("Name") - hp_value = hp.get("DefaultValue", "") - hp_type = hp.get("Type", "String") + hp_name = hp.name + hp_value = hp.default_value + hp_type = hp.type or "String" # For hyperparameters with type INT or FLOAT, if the default value is # empty, skip it. if ( @@ -1351,8 +1224,13 @@ def _get_default_training_instance_type(self) -> str: return machine_spec["InstanceType"] def fit( - self, inputs: Dict[str, Any] = None, wait: bool = True, show_logs: bool = True - ): + self, + inputs: Dict[str, Any] = None, + outputs: Dict[str, Any] = None, + wait: bool = True, + show_logs: bool = True, + job_name: Optional[str] = None, + ) -> TrainingJob: """Submit a training job with the given input data. Args: @@ -1363,50 +1241,53 @@ def fit( `/ml/input/data/{channel_name}` directory in the training container. wait (bool): Specifies whether to block until the training job is completed, either succeeded, failed, or stopped. (Default True). - show_logs (bool): Specifies whether to show the logs produced by the - training job (Default True). + show_logs (bool): Whether to show the logs of the training job. Default to True. + Note that the logs will be shown only when the `wait` is set to True. + job_name (str, optional): The name of the training job. + + Returns: + :class:`pai.training_job.TrainingJob`: The submitted training job. + Raises: UnExpectedStatusException: If the training job fails. """ - inputs = inputs or dict() - job_name = self._gen_job_display_name() - training_job = self._fit(inputs=inputs, job_name=job_name) - self._latest_training_job = training_job - - if wait: - self.wait(show_logs=show_logs) - - def _fit(self, job_name, inputs: Dict[str, Any] = None): - input_configs = self._build_input_data_configs( - inputs, input_channel_defs=self.input_channel_definitions + job_name = self.job_name(job_name=job_name) + input_configs = self.build_inputs( + inputs, + input_channels=self._algo_spec.input_channels, ) - output_configs = self._build_output_data_configs( - job_name, output_channel_defs=self.output_channel_definitions + output_configs = self.build_outputs( + job_name, + output_channels=self._algo_spec.output_channels, + outputs=outputs, ) - - training_job_id = self.session.training_job_api.create( + return self._submit( instance_count=self.instance_count, instance_type=self.instance_type, + instance_spec=self.instance_spec, + resource_id=self.resource_id, job_name=job_name, hyperparameters=self.hyperparameters, - max_running_in_seconds=self.max_run_time, - input_channels=input_configs, - output_channels=output_configs, + max_run_time=self.max_run_time, + inputs=input_configs, + outputs=output_configs, + environments=self.environments, + requirements=self.requirements, algorithm_name=self.algorithm_name, algorithm_version=self.algorithm_version, algorithm_provider=self.algorithm_provider, algorithm_spec=self.algorithm_spec, - user_vpc_config=self.user_vpc_config.to_dict() - if self.user_vpc_config - else None, - ) - training_job = _TrainingJob.get(training_job_id) - print( - f"View the job detail by accessing the console URI:" - f" {training_job.console_uri}" + user_vpc_config=( + self.user_vpc_config.to_dict() if self.user_vpc_config else None + ), + experiment_config=( + self.experiment_config.to_dict() if self.experiment_config else None + ), + labels=self.labels, + wait=wait, + show_logs=show_logs, ) - return training_job def get_outputs_data(self) -> Dict[str, str]: """Show all outputs data paths. @@ -1414,523 +1295,27 @@ def get_outputs_data(self) -> Dict[str, str]: Returns: dict[str, str]: A dictionary of all outputs data paths. """ - if not self._latest_training_job: + if not self.latest_job: raise RuntimeError( - "No TrainingJob for the estimator, output checkpoints data not found." - ) - - return { - ch["Name"]: ch["OutputUri"] - for ch in self._latest_training_job.output_channels - } - - -_TRAINING_LAUNCH_SCRIPT_TEMPLATE = textwrap.dedent( - """\ -#!/bin/sh - -env - -# change to working directory -if [ -n "$PAI_WORKING_DIR" ]; then - echo "Change to Working Directory", $PAI_WORKING_DIR - mkdir -p $PAI_WORKING_DIR && cd $PAI_WORKING_DIR -fi - -# install requirements -if [ -e "requirements.txt" ]; then - echo "Installing dependencies from requirements.txt" - python -m pip install -r requirements.txt -fi - -echo "User program launching" -echo "-----------------------------------------------------------------" - -sh {0} -""" -) - - -class _TrainingEnv(object): - ENV_PAI_HPS = "PAI_HPS" - ENV_PAI_HPS_PREFIX = "PAI_HPS_" - ENV_PAI_USER_ARGS = "PAI_USER_ARGS" - ENV_PAI_INPUT_PREFIX = "PAI_INPUT_" - ENV_PAI_OUTPUT_PREFIX = "PAI_OUTPUT_" - ENV_PAI_WORKING_DIR = "PAI_WORKING_DIR" - - -class _TrainingJobConfig(object): - WORKING_DIR = "/ml/usercode/" - INPUT_CONFIG_DIR = "/ml/input/config/" - INPUT_DATA_DIR = "/ml/input/data/" - OUTPUT_DIR = "/ml/output/" - - -_ENV_NOT_ALLOWED_CHARS = re.compile(r"[^a-zA-Z0-9_]") - - -class _LocalTrainingJob(object): - """A class that represents a local training job running with docker container.""" - - def __init__( - self, - estimator: Estimator, - inputs: Dict[str, Any], - instance_type: str = None, - temp_dir: str = None, - job_name: str = None, - ): - self.estimator = estimator - self.inputs = inputs - self.tmp_dir = temp_dir or tempfile.mkdtemp() - self.job_name = job_name - self.instance_type = instance_type - logger.info("Local TrainingJob temporary directory: {}".format(self.tmp_dir)) - self._container_run: ContainerRun = None - - def __str__(self): - return self.__repr__() - - def __repr__(self): - if self._container_run: - container = self._container_run.container - container_name, container_id, status = ( - container.name, - container.id, - container.status, - ) - else: - container_name, container_id, status = None, None, None - return f"LocalTrainingJob(container_name={container_name}, container_id={container_id}, status={status})" - - @property - def session(self) -> Session: - return self.estimator.session - - def prepare_env(self) -> Dict[str, str]: - """Prepare environment variables for the training job.""" - - # Hyperparameters environment variables - def _normalize_name(name: str) -> str: - # replace all non-alphanumeric characters with underscore - return _ENV_NOT_ALLOWED_CHARS.sub("_", name).upper() - - env = {} - user_args = [] - for name, value in self.estimator.hyperparameters.items(): - env[_TrainingEnv.ENV_PAI_HPS_PREFIX + _normalize_name(name)] = str(value) - user_args.extend(["--" + name, shlex.quote(str(value))]) - env[_TrainingEnv.ENV_PAI_USER_ARGS] = shlex.join(user_args) - env[_TrainingEnv.ENV_PAI_HPS] = json.dumps( - {name: str(value) for name, value in self.estimator.hyperparameters.items()} - ) - - # Environments for input channel - for name, value in self.inputs.items(): - if (is_oss_uri(value) and value.endswith("/")) or os.path.isdir(value): - env[ - _TrainingEnv.ENV_PAI_INPUT_PREFIX + _normalize_name(name) - ] = posixpath.join(_TrainingJobConfig.INPUT_DATA_DIR, name) - else: - file_name = os.path.basename(value) - env[ - _TrainingEnv.ENV_PAI_INPUT_PREFIX + _normalize_name(name) - ] = posixpath.join(_TrainingJobConfig.INPUT_DATA_DIR, name, file_name) - - # Environments for output channel. - # By default, TrainingJob invoked by Estimator will have two output channels: - # 'model' and 'checkpoints' - output_channel = ["model", "checkpoints"] - for name in output_channel: - env[ - _TrainingEnv.ENV_PAI_OUTPUT_PREFIX + _normalize_name(name) - ] = posixpath.join(_TrainingJobConfig.OUTPUT_DIR, name) - - env[_TrainingEnv.ENV_PAI_WORKING_DIR] = _TrainingJobConfig.WORKING_DIR - return env - - def run(self): - """Run estimator job in local with docker.""" - output_model_path = self.output_path() - os.makedirs(output_model_path, exist_ok=True) - volumes = {} - - tmp_dir = tempfile.mkdtemp() - # 1. Prepare source code to directory /ml/usercode - user_code_dir = os.path.join(self.tmp_dir, "user_code") - if is_oss_uri(self.estimator.source_dir): - raise RuntimeError("OSS source code is not supported in local training.") - shutil.copytree(self.estimator.source_dir, user_code_dir) - volumes[user_code_dir] = { - "bind": _TrainingJobConfig.WORKING_DIR, - "mode": "rw", - } - - # 2. Prepare input data for training job. - input_data = self.prepare_input_data() - for host_path, container_path in input_data.items(): - volumes[host_path] = { - "bind": container_path, - "mode": "rw", - } - - # 3. Prepare input config files, such as hyperparameters.json, - # training-job.json, etc. - input_config_path = os.path.join(tmp_dir, "config") - os.makedirs(input_config_path, exist_ok=True) - self.prepare_input_config(input_config_path=input_config_path) - volumes[input_config_path] = { - "bind": _TrainingJobConfig.INPUT_CONFIG_DIR, - "mode": "rw", - } - - execution_dir = os.path.join(tmp_dir, "config", "execution") - os.makedirs(execution_dir, exist_ok=True) - command_path = os.path.join(execution_dir, "command.sh") - with open(command_path, "w") as f: - f.write(self.estimator.command) - launch_script_path = os.path.join(input_config_path, "launch.sh") - with open(launch_script_path, "w") as f: - f.write( - _TRAINING_LAUNCH_SCRIPT_TEMPLATE.format( - posixpath.join( - _TrainingJobConfig.INPUT_CONFIG_DIR, "execution/command.sh" - ) - ) + "Could not find a submitted training job. Please submit a training job" + " before calling this method." ) - # 4. Config output model channel - volumes[output_model_path] = { - "bind": posixpath.join(_TrainingJobConfig.OUTPUT_DIR, "model"), - "mode": "rw", - } - - gpu_count = ( - -1 if self.instance_type.strip() == INSTANCE_TYPE_LOCAL_GPU else None - ) - self._container_run = run_container( - environment_variables=self.prepare_env(), - image_uri=self.estimator.image_uri, - entry_point=[ - "/bin/sh", - posixpath.join(_TrainingJobConfig.INPUT_CONFIG_DIR, "launch.sh"), - ], - volumes=volumes, - working_dir=_TrainingJobConfig.WORKING_DIR, - gpu_count=gpu_count, - ) - - def prepare_input_config(self, input_config_path): - """Prepare input config for TrainingJob, such as hyperparameters.json, - trainingjob.json.""" - with open(os.path.join(input_config_path, "hyperparameters.json"), "w") as f: - hps = self.estimator.hyperparameters or dict() - f.write(json.dumps({k: str(v) for k, v in hps.items()})) - - def prepare_input_data(self) -> Dict[str, str]: - """Prepare input data config.""" - input_data_configs = {} - - for name, input_data in self.inputs.items(): - local_channel_path = os.path.join(self.tmp_dir, f"input/data/{name}") - os.makedirs(local_channel_path, exist_ok=True) - input_data_configs[local_channel_path] = posixpath.join( - _TrainingJobConfig.INPUT_DATA_DIR, name - ) - if is_oss_uri(input_data): - oss_uri_obj = OssUriObj(input_data) - oss_bucket = self.session.get_oss_bucket(oss_uri_obj.bucket_name) - os.makedirs(local_channel_path, exist_ok=True) - download( - oss_uri_obj.object_key, - local_path=local_channel_path, - bucket=oss_bucket, - ) - input_data_configs[local_channel_path] = posixpath.join( - _TrainingJobConfig.INPUT_DATA_DIR, name - ) - else: - # If the input data is local files, copy the input data to a - # temporary directory. - if not os.path.exists(input_data): - raise ValueError( - "Input data not exists: name={} input_data={}".format( - name, input_data - ) - ) - elif os.path.isdir(input_data): - distutils.dir_util.copy_tree(input_data, local_channel_path) - else: - shutil.copy( - input_data, - os.path.join(local_channel_path, os.path.basename(input_data)), - ) - - return input_data_configs - - def wait(self, show_logs: bool = True): - self._container_run.watch(show_logs=show_logs) - - def output_path(self, channel_name="model"): - return os.path.join(self.tmp_dir, "output", f"{channel_name}/") - - def is_succeeded(self): - """Return True if the training job is succeeded, otherwise return False.""" - return self._container_run.is_succeeded() - - -class TrainingJobStatus(object): - CreateFailed = "CreateFailed" - InitializeFailed = "InitializeFailed" - Succeed = "Succeed" - Failed = "Failed" - Terminated = "Terminated" - Creating = "Creating" - Created = "Created" - Initializing = "Initializing" - Submitted = "Submitted" - Running = "Running" - - @classmethod - def completed_status(cls): - return [ - cls.InitializeFailed, - cls.Succeed, - cls.Failed, - cls.Terminated, + uri_outputs = [ + output + for output in self.latest_job.outputs + if isinstance(output, UriOutput) ] - - @classmethod - def failed_status(cls): - return [ - cls.InitializeFailed, - cls.Failed, - cls.CreateFailed, + extra_outputs = [ + output + for output in self.latest_job.outputs + if not isinstance(output, UriOutput) ] - -class TrainingJobChannel(object): - def __init__(self, dataset_id=None, input_uri=None, name=None): - self.dataset_id = dataset_id - self.input_uri = input_uri - self.name = name - - -class _TrainingJob(EntityBaseMixin): - _schema_cls = TrainingJobSchema - - def __init__( - self, - algorithm_name=None, - algorithm_version="1.0.0", - algorithm_provider=ProviderAlibabaPAI, - hyperparameters: Dict[str, Any] = None, - training_job_name: str = None, - instance_type: str = None, - instance_count: int = None, - output_channels: List[Dict[str, str]] = None, - input_channels: List[Dict[str, str]] = None, - labels: Dict[str, str] = None, - max_running_time_in_seconds: int = None, - description: str = None, - session: Session = None, - **kwargs, - ): - super(_TrainingJob, self).__init__(session=session, **kwargs) - session = session or get_default_session() - self.algorithm_name = algorithm_name - self.algorithm_version = algorithm_version - self.algorithm_provider = algorithm_provider - self.training_job_name = training_job_name - self.description = description - self.labels = labels - self.hyperparameters = hyperparameters - self.input_channels = input_channels - self.output_channels = output_channels - self.instance_type = instance_type - self.instance_count = instance_count - self.max_running_time_in_seconds = max_running_time_in_seconds - - # Load only fields - self.create_time = kwargs.pop("create_time", None) - self.modified_time = kwargs.pop("modified_time", None) - self.reason_code = kwargs.pop("reason_code", None) - self.reason_message = kwargs.pop("reason_message", None) - self.status = kwargs.pop("status", None) - self.status_transitions = kwargs.pop("status_transitions", None) - self.training_job_id = kwargs.pop("training_job_id", None) - self.training_job_url = kwargs.pop("training_job_url", None) - - def __repr__(self): - return "TrainingJob(id={})".format(self.training_job_id) - - def __str__(self): - return self.__repr__() - - @property - def id(self): - return self.training_job_id - - @classmethod - def get(cls, training_job_id, session: Session = None) -> "_TrainingJob": - session = session or get_default_session() - res = session.training_job_api.get(training_job_id=training_job_id) - return cls.from_api_object(res, session=session) - - @classmethod - def list( - cls, - status=None, - session: Session = None, - page_size=50, - page_number=1, - ): - session = session or get_default_session() - res = session.training_job_api.list( - status=status, page_size=page_size, page_number=page_number - ) - return [cls.from_api_object(item, session=session) for item in res.items] - - def output_path(self, channel_name="model"): - for output_channel in self.output_channels: - if output_channel["Name"] == channel_name: - return output_channel["OutputUri"] - raise RuntimeError( - f"Output channel is not specified: channel_name={channel_name}" - ) - - @property - def console_uri(self): - if not self.training_job_id: - raise ValueError("The TrainingJob is not submitted") - - return self.training_job_url - - def wait(self, interval=2, show_logs: bool = True): - self.session.training_job_api.refresh_entity(self.training_job_id, self) - - if show_logs: - job_log_printer = _TrainingJobLogPrinter( - training_job_id=self.training_job_id, page_size=20, session=self.session - ) - job_log_printer.start() - else: - job_log_printer = None - try: - while self.status not in TrainingJobStatus.completed_status(): - time.sleep(interval) - self.session.training_job_api.refresh_entity(self.training_job_id, self) - finally: - if job_log_printer: - job_log_printer.stop(wait=True) - - self._on_job_completed() - - def _on_job_completed(self): - # Print an empty line to separate the training job logs and the following logs - print() - if self.status == TrainingJobStatus.Succeed: - print( - f"Training job ({self.training_job_id}) succeeded, you can check the" - f" logs/metrics/output in the console:\n{self.console_uri}" - ) - elif self.status == TrainingJobStatus.Terminated: - print( - f"Training job is ended with status {self.status}: " - f"reason_code={self.reason_code}, reason_message={self.reason_message}." - f"Check the training job in the console:\n{self.console_uri}" - ) - elif self.status in TrainingJobStatus.failed_status(): - print( - f"Training job ({self.training_job_id}) failed, please check the logs" - f" in the console: \n{self.console_uri}" - ) - - message = f"TrainingJob failed: name={self.training_job_name}, " - f"training_job_id={self.training_job_id}, " - f"reason_code={self.reason_code}, status={self.status}, " - f"reason_message={self.reason_message}" - - raise UnexpectedStatusException(message=message, status=self.status) - - def _reload(self): - """Reload the training job from the PAI Service,""" - self.session.training_job_api.refresh_entity(self.training_job_id, self) - - def is_succeeded(self): - """Return True if the training job is succeeded""" - self._reload() - return self.status == TrainingJobStatus.Succeed - - -class _TrainingJobLogPrinter(object): - """A class used to print logs for a training job""" - - executor = ThreadPoolExecutor(5) - - def __init__( - self, training_job_id: str, page_size=10, session: Optional[Session] = None - ): - self.training_job_id = training_job_id - self.session = session - self.page_size = page_size - self._future = None - self._stop = False - - def _list_logs(self): - page_number, page_offset = 1, 0 - # print training job logs. - while not self._stop: - res = self.session.training_job_api.list_logs( - self.training_job_id, - page_number=page_number, - page_size=self.page_size, - ) - # 1. move to next page - if len(res.items) == self.page_size: - # print new logs starting from page_offset - self._print_logs(logs=res.items[page_offset:]) - page_number += 1 - page_offset = 0 - # 2. stay at the current page. - else: - if len(res.items) > page_offset: - # print new logs starting from page_offset - self._print_logs(logs=res.items[page_offset:]) - page_offset = len(res.items) - time.sleep(1) - - # When _stop is True, wait and print remaining logs. - time.sleep(10) - while True: - res = self.session.training_job_api.list_logs( - self.training_job_id, - page_number=page_number, - page_size=self.page_size, + if extra_outputs: + logger.warning( + "Extra outputs are provided in the training job, but only URI outputs" + " are supported. The extra outputs will be ignored: %s", + extra_outputs, ) - # There maybe more logs in the next page - if len(res.items) == self.page_size: - self._print_logs(logs=res.items[page_offset:]) - page_number += 1 - page_offset = 0 - # No more logs in the next page. - else: - if len(res.items) > page_offset: - self._print_logs(logs=res.items[page_offset:]) - break - - def _print_logs(self, logs: List[str]): - for log in logs: - print(log) - - def start(self): - if self._future: - raise ValueError("The training job log printer is already started") - self._stop = False - self._future = self.executor.submit(self._list_logs) - - def stop(self, wait: bool = True): - self._stop = True - if self._future: - self._future.result() + return {ch.name: ch.output_uri for ch in uri_outputs} diff --git a/pai/experiment.py b/pai/experiment.py new file mode 100644 index 0000000..7f1f1c0 --- /dev/null +++ b/pai/experiment.py @@ -0,0 +1,238 @@ +# Copyright 2023 Alibaba, Inc. or its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import webbrowser +from typing import Iterator, Optional + +from .common.logging import get_logger +from .session import Session, get_default_session +from .tensorboard import TensorBoard + +logger = get_logger(__name__) + + +class Experiment(object): + """An experiment is a collection of runs. It can be used to compare the + performance of different training jobs. You can compare the metrics of + different training jobs in a same experiment by one single TensorBoard. + + You can create an experiment by calling `Experiment.create`. + + When you create a training job, you can specify the experiment name to + make the relationship between the job and the experiment. In this case, + the training job will use the artifact_uri of experiment as default output + path, so you do not need to specify the output path of the training job + anymore. + + Example: + experiment = Experiment.create( + artifact_uri="oss://bucket/path", + name="my_experiment", + ) + est = Estimator( + source_dir="./train/src/", + command="python train.py", + image_uri = training_image_uri, + instance_type="ecs.c6.xlarge", + hyperparameters={ + "n_estimators": 50 + }, + experiment_config=ExperimentConfig( + experiment_name="my_experiment", + ) + ) + + est.fit(inputs={ + "train": "oss://{YOUR_BUCKET_NAME}/path/to/train-data", + "test": "oss://{YOUR_BUCKET_NAME}/path/to/test-data", + }) + + """ + + def __init__( + self, + experiment_id: str, + name: str, + artifact_uri: str, + session: Optional[Session] = None, + ): + """Experiment constructor. + + Args: + experiment_id (str): The UUID of the experiment. It is generated from the PAI service. + name (str): The name of experiment is unique within the workspace. The experiment name must adhere to + the following naming convention: The maximum length is 63 characters. It must start with an uppercase + or lowercase letter or a number, and may include hyphens(-) and underscores(_). + artifact_uri (str): An OSS URI which is the default base path to store the output of the job in the + experiment, including model files and TensorBoard logs. + session (Session, optional): A PAI session instance used for communicating + with PAI service. + """ + + self.session = session or get_default_session() + self.experiment_id = experiment_id + self.name = name + self.artifact_uri = artifact_uri + self._api_object = session.experiment_api.get(experiment_id) + + @classmethod + def create( + cls, + artifact_uri: str, + name: str, + session: Optional[Session] = None, + ) -> "Experiment": + """Create experiment. + Args: + artifact_uri (str): Specifies an OSS URI to store the output of the job in the experiment. + name (str): The name of the experiment. The name must be unique within the workspace. + session (Session): The session to be used. + Returns: + Experiment: The created experiment. + """ + session = session or get_default_session() + experiment_id = session.experiment_api.create( + artifact_uri=artifact_uri, name=name, workspace_id=session.workspace_id + ) + experiment = Experiment( + experiment_id=experiment_id, + name=name, + artifact_uri=artifact_uri, + session=session, + ) + return experiment + + @classmethod + def get(cls, experiment_id: str) -> "Experiment": + session = get_default_session() + experiment = session.experiment_api.get(experiment_id) + return Experiment( + experiment_id=experiment_id, + name=experiment["Name"], + artifact_uri=experiment["ArtifactUri"], + session=session, + ) + + @classmethod + def get_by_name( + cls, name: str, session: Optional[Session] = None + ) -> Optional["Experiment"]: + """Get experiment by name. + + Args: + name (str): The name of the experiment. + session (Session): The session to be used. + + Returns: + Experiment: The experiment with the specified name. + + """ + exp = next( + (exp for exp in cls.list(name=name, session=session) if exp.name == name), + None, + ) + return exp + + def update( + self, + name: str, + ) -> "Experiment": + """Update experiment name. + Args: + name (str): New experiment name. + Returns: + Experiment: The updated experiment. + """ + self.session.experiment_api.update(self.experiment_id, name=name) + self.name = name + return self + + def delete(self): + """Delete experiment.""" + self.session.experiment_api.delete(self.experiment_id) + + @classmethod + def list( + cls, + name: str = None, + session: Optional[Session] = None, + ) -> Iterator["Experiment"]: + """List experiments. + Args: + name (str): Filter by name. + session (Session): The session to be used. + Return: + Iterator[Experiment]: Experiment iterator. + """ + session = session or get_default_session() + page_size = 50 + page_number = 1 + while True: + result = session.experiment_api.list( + name=name, + page_size=page_size, + page_number=page_number, + ).items + if not result: + break + + for item in result: + yield cls( + session=session, + experiment_id=item["ExperimentId"], + name=item["Name"], + artifact_uri=item["ArtifactUri"], + ) + page_number += 1 + + def tensorboard_data(self) -> str: + """Output TensorBoard logs path. + + Returns: + str: A string in OSS URI format refers to the tensorboard log of experiment. + """ + return self._api_object.get("TensorboardLogUri") + + def tensorboard(self, wait=True) -> "TensorBoard": + """Launch a TensorBoard instance with the tensorboard logs path from the current experiment. + Args: + wait (bool): Wait for TensorBoard to be ready. + """ + from pai.tensorboard import TensorBoard + + source_type = "experiment" + res = self.session.tensorboard_api.list( + source_type=source_type, + source_id=self.experiment_id, + ) + + if res.items: + if len(res.items) > 1: + logger.warning( + "Found multiple TensorBoard instances for the experiment, use the first one." + ) + tb_id = res.items[0]["TensorboardId"] + tb = TensorBoard(tb_id, session=self.session) + tb.start(wait=wait) + else: + tb = TensorBoard.create( + uri=self.tensorboard_data(), + wait=wait, + display_name=self.name + "_TensorBoard", + source_type=source_type, + source_id=self.experiment_id, + session=self.session, + ) + webbrowser.open(tb.app_uri) + return tb diff --git a/pai/huggingface/estimator.py b/pai/huggingface/estimator.py index e8e3d7b..b08c364 100644 --- a/pai/huggingface/estimator.py +++ b/pai/huggingface/estimator.py @@ -12,15 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging from typing import Any, Dict, List, Optional from ..api.image import ImageLabel +from ..common.logging import get_logger from ..common.utils import to_semantic_version from ..estimator import Estimator from ..session import Session -logger = logging.getLogger(__name__) +logger = get_logger(__name__) class HuggingFaceEstimator(Estimator): @@ -196,6 +196,8 @@ def __init__( session=session, **kwargs, ) + # Check image_uri and transformers_version + self.training_image_uri() def _validate_image_uri(self, image_uri: str, transformers_version: str) -> None: """Check if image_uri or transformers_version arguments are specified.""" @@ -272,6 +274,7 @@ def _get_supported_tf_versions_for_training(self) -> List[str]: if label["Value"] not in res: res.append(label["Value"]) + res.sort(key=lambda x: to_semantic_version(x)) return res def _get_latest_tf_version_for_training(self) -> str: diff --git a/pai/huggingface/model.py b/pai/huggingface/model.py index 7b46b7a..cc15f57 100644 --- a/pai/huggingface/model.py +++ b/pai/huggingface/model.py @@ -12,26 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging -import re from typing import Any, Dict, List, Optional, Union +from ..common.logging import get_logger from ..common.utils import to_semantic_version from ..image import ImageLabel -from ..model import ( +from ..model._model import ( DefaultServiceConfig, ModelBase, ResourceConfig, + StorageConfigBase, container_serving_spec, ) from ..serializers import SerializerBase from ..session import Session, get_default_session -logger = logging.getLogger(__name__) - -_PAI_HF_IMAGE_TAG_PATTERN_INFERENCE = re.compile( - r"huggingface-inference:transformers-(\d.+)-(gpu|cpu)" -) +logger = get_logger(__name__) class HuggingFaceModel(ModelBase): @@ -81,6 +77,7 @@ def __init__( requirements: Optional[List[str]] = None, requirements_path: Optional[str] = None, health_check: Optional[Dict[str, Any]] = None, + storage_configs: Optional[List[StorageConfigBase]] = None, session: Optional[Session] = None, ): """Initialize a HuggingFace Model. @@ -149,6 +146,9 @@ def __init__( health_check (Dict[str, Any], optional): The health check configuration. If it not set, A TCP readiness probe will be used to check the health of the Model server. + storage_configs (List[StorageConfigBase], optional): A list of storage configs + used to mount the storage to the container. The storage can be OSS, NFS, + SharedMemory, or NodeStorage, etc. session (:class:`pai.session.Session`, optional): A pai session object manages interactions with PAI REST API. @@ -175,11 +175,14 @@ def __init__( self.requirements = requirements self.requirements_path = requirements_path self.health_check = health_check + self.storage_configs = storage_configs super(HuggingFaceModel, self).__init__( model_data=self.model_data, session=session or get_default_session(), ) + # Check image_uri and transformers_version + self.serving_image_uri() def _validate_args(self, image_uri: str, transformers_version: str) -> None: """Check if image_uri or transformers_version arguments are specified.""" @@ -189,7 +192,7 @@ def _validate_args(self, image_uri: str, transformers_version: str) -> None: "Specify either transformers_version or image_uri." ) - def serving_image_uri(self, instance_type: str) -> str: + def serving_image_uri(self) -> str: """Return the Docker image to use for serving. The :meth:`pai.huggingface.model.HuggingFaceModel.deploy` method, that does the @@ -212,10 +215,13 @@ def serving_image_uri(self, instance_type: str) -> str: # Filter images by Transformers version if self.transformers_version == "latest": latest_version = self._get_latest_tf_version_for_inference() - name = f"huggingface-inference:transformers-{latest_version}-" + labels.append(ImageLabel.framework_version("Transformers", latest_version)) else: - name = f"huggingface-inference:transformers-{self.transformers_version}-" + labels.append( + ImageLabel.framework_version("Transformers", self.transformers_version) + ) + name = "huggingface-inference:" resp = self.session.image_api.list( name=name, labels=labels, @@ -241,25 +247,29 @@ def _get_supported_tf_versions_for_inference(self) -> List[str]: ImageLabel.EAS_LABEL, ImageLabel.PROVIDER_PAI_LABEL, ImageLabel.DEVICE_TYPE_GPU, + ImageLabel.framework_version("Transformers", "*"), ] - name = "huggingface-inference:transformers-" + name = "huggingface-inference:" list_images = self.session.image_api.list( name=name, labels=labels, + verbose=True, workspace_id=0, ).items res = [] for image in list_images: - tag_match = _PAI_HF_IMAGE_TAG_PATTERN_INFERENCE.match(image["Name"]) - transformer_version, _ = tag_match.groups() - if transformer_version not in res: - res.append(transformer_version) - + for label in image["Labels"]: + if ( + label["Key"] == "system.framework.Transformers" + and label["Value"] not in res + ): + res.append(label["Value"]) + res.sort(key=lambda x: to_semantic_version(x)) return res def _get_latest_tf_version_for_inference(self) -> str: - """Return the latest Transformers version for inference.""" + """Return the latest transformers version for inference.""" res = self._get_supported_tf_versions_for_inference() return max( res, @@ -327,7 +337,7 @@ def deploy( :class:`pai.predictor.Predictor` : A PAI ``Predictor`` instance used for making prediction to the prediction service. """ - image_uri = self.serving_image_uri(instance_type=instance_type) + image_uri = self.serving_image_uri() self.inference_spec = container_serving_spec( command=self.command, image_uri=image_uri, @@ -338,6 +348,7 @@ def deploy( requirements=self.requirements, requirements_path=self.requirements_path, health_check=self.health_check, + storage_configs=self.storage_configs, session=self.session, ) diff --git a/pai/image.py b/pai/image.py index 21de961..6644b03 100644 --- a/pai/image.py +++ b/pai/image.py @@ -12,15 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging import re from typing import Any, Dict, List, Optional from .api.image import SUPPORTED_IMAGE_FRAMEWORKS, ImageLabel +from .common.logging import get_logger from .common.utils import make_list_resource_iterator, to_semantic_version from .session import Session, get_default_session -logger = logging.getLogger(__name__) +logger = get_logger(__name__) _NORMALIZED_FRAMEWORK_NAMES = { diff --git a/pai/job/__init__.py b/pai/job/__init__.py new file mode 100644 index 0000000..239b3ff --- /dev/null +++ b/pai/job/__init__.py @@ -0,0 +1,54 @@ +# Copyright 2024 Alibaba, Inc. or its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ._local_training_job import LocalTrainingJob +from ._training_job import ( + AlgorithmSpec, + Channel, + CodeDir, + ExperimentConfig, + HyperParameterDefinition, + InstanceSpec, + ModelRecipeSpec, + OssLocation, + ResourceType, + SpotSpec, + SpotStrategy, + TrainingJob, + TrainingJobStatus, + UriInput, + UriOutput, + UserVpcConfig, + _TrainingJobSubmitter, +) + +__all__ = [ + "TrainingJob", + "ModelRecipeSpec", + "TrainingJobStatus", + "Channel", + "HyperParameterDefinition", + "OssLocation", + "AlgorithmSpec", + "CodeDir", + "LocalTrainingJob", + "UriOutput", + "UserVpcConfig", + "ExperimentConfig", + "InstanceSpec", + "UriInput", + "SpotSpec", + "ResourceType", + "SpotStrategy", +] diff --git a/pai/job/_local_training_job.py b/pai/job/_local_training_job.py new file mode 100644 index 0000000..5f9c1d7 --- /dev/null +++ b/pai/job/_local_training_job.py @@ -0,0 +1,293 @@ +# Copyright 2024 Alibaba, Inc. or its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import distutils.dir_util +import json +import os +import posixpath +import re +import shlex +import shutil +import tempfile +import textwrap +import typing +from typing import Any, Dict + +from pai.session import Session + +from ..common.consts import INSTANCE_TYPE_LOCAL_GPU +from ..common.docker_utils import ContainerRun, run_container +from ..common.logging import get_logger +from ..common.oss_utils import OssUriObj, download, is_oss_uri + +if typing.TYPE_CHECKING: + from ..estimator import Estimator + + +logger = get_logger(__name__) + + +class _TrainingEnv(object): + ENV_PAI_HPS = "PAI_HPS" + ENV_PAI_HPS_PREFIX = "PAI_HPS_" + ENV_PAI_USER_ARGS = "PAI_USER_ARGS" + ENV_PAI_INPUT_PREFIX = "PAI_INPUT_" + ENV_PAI_OUTPUT_PREFIX = "PAI_OUTPUT_" + ENV_PAI_WORKING_DIR = "PAI_WORKING_DIR" + + +class _TrainingJobConfig(object): + WORKING_DIR = "/ml/usercode/" + INPUT_CONFIG_DIR = "/ml/input/config/" + INPUT_DATA_DIR = "/ml/input/data/" + OUTPUT_DIR = "/ml/output/" + + +_ENV_NOT_ALLOWED_CHARS = re.compile(r"[^a-zA-Z0-9_]") +_TRAINING_LAUNCH_SCRIPT_TEMPLATE = textwrap.dedent( + """\ +#!/bin/sh + +env + +# change to working directory +if [ -n "$PAI_WORKING_DIR" ]; then + echo "Change to Working Directory", $PAI_WORKING_DIR + mkdir -p $PAI_WORKING_DIR && cd $PAI_WORKING_DIR +fi + +# install requirements +if [ -e "requirements.txt" ]; then + echo "Installing dependencies from requirements.txt" + python -m pip install -r requirements.txt +fi + +echo "User program launching" +echo "-----------------------------------------------------------------" + +sh {0} +""" +) + + +class LocalTrainingJob(object): + """A class that represents a local training job running with docker container.""" + + def __init__( + self, + estimator: "Estimator", + inputs: Dict[str, Any], + instance_type: str = None, + temp_dir: str = None, + job_name: str = None, + ): + self.estimator = estimator + self.inputs = inputs + self.tmp_dir = temp_dir or tempfile.mkdtemp() + self.job_name = job_name + self.instance_type = instance_type + logger.info("Local TrainingJob temporary directory: {}".format(self.tmp_dir)) + self._container_run: ContainerRun = None + + def __str__(self): + return self.__repr__() + + def __repr__(self): + if self._container_run: + container = self._container_run.container + container_name, container_id, status = ( + container.name, + container.id, + container.status, + ) + else: + container_name, container_id, status = None, None, None + return f"LocalTrainingJob(container_name={container_name}, container_id={container_id}, status={status})" + + @property + def session(self) -> Session: + return self.estimator.session + + def prepare_env(self) -> Dict[str, str]: + """Prepare environment variables for the training job.""" + + # Hyperparameters environment variables + def _normalize_name(name: str) -> str: + # replace all non-alphanumeric characters with underscore + return _ENV_NOT_ALLOWED_CHARS.sub("_", name).upper() + + env = {} + user_args = [] + for name, value in self.estimator.hyperparameters.items(): + env[_TrainingEnv.ENV_PAI_HPS_PREFIX + _normalize_name(name)] = str(value) + user_args.extend(["--" + name, shlex.quote(str(value))]) + env[_TrainingEnv.ENV_PAI_USER_ARGS] = " ".join( + [shlex.quote(v) for v in user_args] + ) + env[_TrainingEnv.ENV_PAI_HPS] = json.dumps( + {name: str(value) for name, value in self.estimator.hyperparameters.items()} + ) + + # Environments for input channel + for name, value in self.inputs.items(): + if (is_oss_uri(value) and value.endswith("/")) or os.path.isdir(value): + env[ + _TrainingEnv.ENV_PAI_INPUT_PREFIX + _normalize_name(name) + ] = posixpath.join(_TrainingJobConfig.INPUT_DATA_DIR, name) + else: + file_name = os.path.basename(value) + env[ + _TrainingEnv.ENV_PAI_INPUT_PREFIX + _normalize_name(name) + ] = posixpath.join(_TrainingJobConfig.INPUT_DATA_DIR, name, file_name) + + # Environments for output channel. + # By default, TrainingJob invoked by Estimator will have two output channels: + # 'model' and 'checkpoints' + output_channel = ["model", "checkpoints"] + for name in output_channel: + env[ + _TrainingEnv.ENV_PAI_OUTPUT_PREFIX + _normalize_name(name) + ] = posixpath.join(_TrainingJobConfig.OUTPUT_DIR, name) + + env[_TrainingEnv.ENV_PAI_WORKING_DIR] = _TrainingJobConfig.WORKING_DIR + return env + + def run(self): + """Run estimator job in local with docker.""" + output_model_path = self.output_path() + os.makedirs(output_model_path, exist_ok=True) + volumes = {} + + tmp_dir = tempfile.mkdtemp() + # 1. Prepare source code to directory /ml/usercode + user_code_dir = os.path.join(self.tmp_dir, "user_code") + if is_oss_uri(self.estimator.source_dir): + raise RuntimeError("OSS source code is not supported in local training.") + shutil.copytree(self.estimator.source_dir, user_code_dir) + volumes[user_code_dir] = { + "bind": _TrainingJobConfig.WORKING_DIR, + "mode": "rw", + } + + # 2. Prepare input data for training job. + input_data = self.prepare_input_data() + for host_path, container_path in input_data.items(): + volumes[host_path] = { + "bind": container_path, + "mode": "rw", + } + + # 3. Prepare input config files, such as hyperparameters.json, + # training-job.json, etc. + input_config_path = os.path.join(tmp_dir, "config") + os.makedirs(input_config_path, exist_ok=True) + self.prepare_input_config(input_config_path=input_config_path) + volumes[input_config_path] = { + "bind": _TrainingJobConfig.INPUT_CONFIG_DIR, + "mode": "rw", + } + + execution_dir = os.path.join(tmp_dir, "config", "execution") + os.makedirs(execution_dir, exist_ok=True) + command_path = os.path.join(execution_dir, "command.sh") + with open(command_path, "w") as f: + f.write(self.estimator.command) + launch_script_path = os.path.join(input_config_path, "launch.sh") + with open(launch_script_path, "w") as f: + f.write( + _TRAINING_LAUNCH_SCRIPT_TEMPLATE.format( + posixpath.join( + _TrainingJobConfig.INPUT_CONFIG_DIR, "execution/command.sh" + ) + ) + ) + + # 4. Config output model channel + volumes[output_model_path] = { + "bind": posixpath.join(_TrainingJobConfig.OUTPUT_DIR, "model"), + "mode": "rw", + } + + gpu_count = ( + -1 if self.instance_type.strip() == INSTANCE_TYPE_LOCAL_GPU else None + ) + self._container_run = run_container( + environment_variables=self.prepare_env(), + image_uri=self.estimator.image_uri, + entry_point=[ + "/bin/sh", + posixpath.join(_TrainingJobConfig.INPUT_CONFIG_DIR, "launch.sh"), + ], + volumes=volumes, + working_dir=_TrainingJobConfig.WORKING_DIR, + gpu_count=gpu_count, + ) + + def prepare_input_config(self, input_config_path): + """Prepare input config for TrainingJob, such as hyperparameters.json, + trainingjob.json.""" + with open(os.path.join(input_config_path, "hyperparameters.json"), "w") as f: + hps = self.estimator.hyperparameters or dict() + f.write(json.dumps({k: str(v) for k, v in hps.items()})) + + def prepare_input_data(self) -> Dict[str, str]: + """Prepare input data config.""" + input_data_configs = {} + + for name, input_data in self.inputs.items(): + local_channel_path = os.path.join(self.tmp_dir, f"input/data/{name}") + os.makedirs(local_channel_path, exist_ok=True) + input_data_configs[local_channel_path] = posixpath.join( + _TrainingJobConfig.INPUT_DATA_DIR, name + ) + if is_oss_uri(input_data): + oss_uri_obj = OssUriObj(input_data) + oss_bucket = self.session.get_oss_bucket(oss_uri_obj.bucket_name) + os.makedirs(local_channel_path, exist_ok=True) + download( + oss_uri_obj.object_key, + local_path=local_channel_path, + bucket=oss_bucket, + ) + input_data_configs[local_channel_path] = posixpath.join( + _TrainingJobConfig.INPUT_DATA_DIR, name + ) + else: + # If the input data is local files, copy the input data to a + # temporary directory. + if not os.path.exists(input_data): + raise ValueError( + "Input data not exists: name={} input_data={}".format( + name, input_data + ) + ) + elif os.path.isdir(input_data): + distutils.dir_util.copy_tree(input_data, local_channel_path) + else: + shutil.copy( + input_data, + os.path.join(local_channel_path, os.path.basename(input_data)), + ) + + return input_data_configs + + def wait(self, show_logs: bool = True): + self._container_run.watch(show_logs=show_logs) + + def output_path(self, channel_name="model"): + return os.path.join(self.tmp_dir, "output", f"{channel_name}/") + + def is_succeeded(self): + """Return True if the training job is succeeded, otherwise return False.""" + return self._container_run.is_succeeded() diff --git a/pai/job/_training_job.py b/pai/job/_training_job.py new file mode 100644 index 0000000..7917f37 --- /dev/null +++ b/pai/job/_training_job.py @@ -0,0 +1,929 @@ +# Copyright 2024 Alibaba, Inc. or its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import posixpath +import time +import typing +from concurrent.futures import ThreadPoolExecutor +from enum import Enum +from typing import Any, Dict, List, Optional, Union + +from pydantic import BaseModel, ConfigDict, Field +from pydantic.alias_generators import to_pascal +from Tea.exceptions import TeaException + +from ..api.base import PaginatedResult +from ..common.consts import StoragePathCategory +from ..common.logging import get_logger +from ..common.oss_utils import OssUriObj, is_oss_uri, upload +from ..common.utils import ( + is_dataset_id, + is_filesystem_uri, + is_odps_table_uri, + name_from_base, + print_table, + random_str, + retry, + to_plain_text, +) +from ..exception import UnexpectedStatusException +from ..session import Session, get_default_session + +if typing.TYPE_CHECKING: + from ..estimator import FileSystemInputBase + +logger = get_logger(__name__) + + +def as_oss_dir_uri(uri: str): + return uri if uri.endswith("/") else uri + "/" + + +DEFAULT_OUTPUT_MODEL_CHANNEL_NAME = "model" +DEFAULT_CHECKPOINT_CHANNEL_NAME = "checkpoints" +DEFAULT_TENSORBOARD_CHANNEL_NAME = "tensorboard" + + +class SpotStrategy(str, Enum): + SpotWithPriceLimit = "SpotWithPriceLimit" + SpotAsPriceGo = "SpotAsPriceGo" + + def __repr__(self): + return self.value + + +class ResourceType(str, Enum): + Lingjun = "Lingjun" + General = "General" + + +class BaseAPIModel(BaseModel): + + model_config = ConfigDict( + alias_generator=to_pascal, + populate_by_name=True, + ) + + def model_dump(self, **kwargs) -> Dict[str, Any]: + kwargs.update({"by_alias": True, "exclude_none": True}) + return super().model_dump(**kwargs) + + def to_dict(self): + return self.model_dump() + + +class TrainingJobStatus(object): + CreateFailed = "CreateFailed" + InitializeFailed = "InitializeFailed" + Succeed = "Succeed" + Failed = "Failed" + Terminated = "Terminated" + Creating = "Creating" + Created = "Created" + Initializing = "Initializing" + Submitted = "Submitted" + Running = "Running" + + @classmethod + def completed_status(cls): + return [ + cls.InitializeFailed, + cls.Succeed, + cls.Failed, + cls.Terminated, + ] + + @classmethod + def failed_status(cls): + return [ + cls.InitializeFailed, + cls.Failed, + cls.CreateFailed, + ] + + +class UserVpcConfig(BaseAPIModel): + """UserVpcConfig represents the VPC configuration for the training job instance.""" + + vpc_id: str = Field( + ..., + description="Specifies the ID of the VPC that training job instance connects to.", + ) + security_group_id: str = Field( + ..., + description="The ID of the security group that training job instances belong to.", + ) + switch_id: Optional[str] = Field( + None, + description="The ID of the vSwitch to which the instance belongs. Defaults to None.", + ) + extended_cidrs: Optional[List[str]] = Field( + None, + description="The CIDR blocks configured for the ENI of the training job instance. " + "If it is not specified, the CIDR block will be configured as the same as the VPC " + "network segmentation, which means that the training job instance can access all " + "resources in the VPC. Defaults to None.", + ) + + +class ExperimentConfig(BaseAPIModel): + """ExperimentConfig is used to configure the experiment to which the job belongs.""" + + experiment_id: str = Field( + ..., + description="Specifies the ID of the experiment that training job instance belongs to.", + ) + + +class OssLocation(BaseAPIModel): + """OSS location.""" + + bucket: str = Field(..., description="OSS bucket name.") + key: str = Field(..., description="Object key in the OSS bucket.") + endpoint: Optional[str] = Field(None, description="OSS service endpoint URL.") + + +class CodeDir(BaseAPIModel): + """Source code location""" + + location_value: Union[OssLocation, Dict[str, Any]] = Field( + ..., description="Location of the code directory." + ) + location_type: str = Field( + ..., description="Type of the code directory location, e.g., OSS." + ) + + +# HyperParameter +class HyperParameter(BaseAPIModel): + """A hyperparameter for a training job.""" + + value: str = Field(..., description="Value of the hyperparameter.") + name: str = Field(..., description="Name of the hyperparameter.") + + +class InstanceSpec(BaseAPIModel): + """Instance resource configuration""" + + memory: str = Field(..., description="Memory allocation for the instance.") + cpu: str = Field(..., alias="CPU", description="CPU allocation for the instance.") + gpu: str = Field(..., alias="GPU", description="GPU allocation for the instance.") + shared_memory: Optional[str] = Field( + None, description="Shared memory allocation, if applicable." + ) + + +class ComputeResource(BaseAPIModel): + """Compute Resource Configuration.""" + + ecs_count: Optional[int] = Field(None, description="Number of ECS instances.") + ecs_spec: Optional[str] = Field(None, description="Specification of ECS instances.") + instance_count: Optional[int] = Field(None, description="Number of instances.") + instance_spec: Optional[InstanceSpec] = Field( + None, description="Specification for instances." + ) + + +# URI Input and Output +class UriInput(BaseAPIModel): + """URI Input for a training job.""" + + name: str = Field(..., description="Name of the input.") + input_uri: str = Field(..., description="URI of the input data.") + + +class UriOutput(BaseAPIModel): + """URI Output for a training job.""" + + name: str = Field(..., description="Name of the output.") + output_uri: str = Field(..., description="URI of the output data.") + + +class DatasetConfig(BaseAPIModel): + """Dataset Configuration""" + + dataset_id: str = Field(..., description="Unique ID of the dataset.") + name: Optional[str] = Field(None, description="Name of the dataset.") + dataset_name: Optional[str] = Field( + None, description="Alternative name of the dataset." + ) + + +class Channel(BaseAPIModel): + """Channel Configuration.""" + + name: str = Field(..., description="Name of the channel.") + description: Optional[str] = Field(None, description="Description of the channel.") + required: Optional[bool] = Field( + None, description="Indicates if the channel is required." + ) + supported_channel_types: Optional[List[str]] = Field( + None, description="Supported types for this channel." + ) + properties: Optional[Dict[str, Any]] = Field( + None, description="Additional properties of the channel." + ) + + +# HyperParameter Definition +class HyperParameterDefinition(BaseAPIModel): + """HyerParameter Definition.""" + + name: str = Field(..., description="Name of the hyperparameter.") + type: Optional[str] = Field(None, description="Type of the hyperparameter.") + default_value: Optional[str] = Field( + None, description="Default value of the hyperparameter." + ) + description: Optional[str] = Field( + None, description="Description of the hyperparameter." + ) + required: bool = Field( + False, description="Indicates if the hyperparameter is required." + ) + + +class SchedulerConfig(BaseAPIModel): + max_running_time_in_seconds: Optional[int] = None + + +class MetricDefinition(BaseAPIModel): + description: Optional[str] = Field(None, description="Description of the metric.") + name: str = Field(..., description="Name of the metric.") + regex: str = Field( + ..., description="Regular expression used for capturing the metric." + ) + + +class AlgorithmSpec(BaseAPIModel): + """Algorithm Specification.""" + + command: List[str] = Field(..., description="Command to run the training job.") + image: str = Field(..., description="Docker image for the training job.") + supported_channel_types: List[str] = Field(default_factory=list) + output_channels: List[Channel] = Field( + default_factory=list, description="Output channels." + ) + input_channels: List[Channel] = Field( + default_factory=list, description="Input channels." + ) + supports_distributed_training: Optional[bool] = Field( + True, description="Whether the algorithm supports distributed training." + ) + supported_instance_types: Optional[List[str]] = Field( + None, description="Supported instance types." + ) + metric_definitions: Optional[List[MetricDefinition]] = Field( + None, description="Metric definitions." + ) + hyperparameter_definitions: List[HyperParameterDefinition] = Field( + default_factory=list, + alias="HyperParameters", + description="Hyperparameter definitions.", + ) + job_type: str = Field(default="PyTorchJob") + code_dir: Optional[CodeDir] = Field(None, description="Source code location.") + customization: Optional[Dict[str, Any]] = Field( + None, description="Whether the algorithm supports customize code." + ) + + +class ModelRecipeSpec(BaseAPIModel): + compute_resource: Optional[ComputeResource] = None + hyperparameters: List[HyperParameter] = Field( + default_factory=list, alias="HyperParameters" + ) + inputs: List[Union[UriInput, DatasetConfig]] = Field( + default_factory=list, alias="InputChannels" + ) + scheduler: Optional[SchedulerConfig] = None + supported_instance_types: Optional[List[str]] = None + algorithm_spec: Optional[AlgorithmSpec] = None + algorithm_version: Optional[str] = None + algorithm_provider: Optional[str] = None + algorithm_name: Optional[str] = None + environments: Optional[Dict[str, str]] = None + requirements: Optional[List[str]] = None + + +class SpotSpec(BaseAPIModel): + spot_strategy: SpotStrategy = Field( + ..., + description="Spot instance strategy, support 'SpotWithPriceLimit', 'SpotAsPriceGo'", + ) + spot_discount_limit: Optional[float] = Field( + None, + description="Spot instance discount limit, maximum 2 decimal places, " + "required when spot_strategy is 'SpotWithPriceLimit'." + "For example, 0.5 means 50% off the original price.", + ) + + +class TrainingJob(BaseAPIModel): + """TrainingJob represents a training job in the PAI service.""" + + algorithm_id: Optional[str] = None + algorithm_name: Optional[str] = None + algorithm_provider: Optional[str] = None + algorithm_version: Optional[str] = None + algorithm_spec: Optional[AlgorithmSpec] = None + compute_resource: Optional[ComputeResource] = None + scheduler: Optional[SchedulerConfig] = None + experiment_config: Optional[Dict[str, Any]] = None + inputs: List[Union[UriInput, DatasetConfig]] = Field( + default=list, alias="InputChannels" + ) + outputs: List[Union[UriOutput, DatasetConfig]] = Field( + default=list, alias="OutputChannels" + ) + hyperparameters: List[HyperParameter] = Field( + default_factory=list, alias="HyperParameters" + ) + labels: Optional[List[Dict[str, str]]] = Field(default_factory=list) + training_job_description: Optional[str] = None + training_job_id: Optional[str] = None + training_job_name: Optional[str] = None + workspace_id: Optional[str] = None + training_job_url: Optional[str] = None + status: Optional[str] = None + reason_code: Optional[str] = None + reason_message: Optional[str] = None + + def __hash__(self): + return hash(self.training_job_id) + + def __eq__(self, other: "TrainingJob"): + return ( + isinstance(other, TrainingJob) + and self.training_job_id == other.training_job_id + ) + + @property + def id(self): + return self.training_job_id + + @classmethod + def get(cls, training_job_id, session: Session = None) -> "TrainingJob": + session = session or get_default_session() + res = session.training_job_api.get(training_job_id=training_job_id) + return cls.model_validate(res) + + @classmethod + def list( + cls, + status: Optional[str] = None, + session: Optional[Session] = None, + page_size: int = 50, + page_number: int = 1, + ): + session = session or get_default_session() + res = session.training_job_api.list( + status=status, page_size=page_size, page_number=page_number + ) + return [cls.model_validate(item) for item in res.items] + + def output_path(self, channel_name="model"): + for output_channel in self.outputs: + if output_channel.name == channel_name: + return output_channel.output_uri + raise RuntimeError( + f"Output channel is not specified: channel_name={channel_name}" + ) + + @property + def console_uri(self): + if not self.training_job_id: + raise ValueError("The TrainingJob is not submitted") + + return self.training_job_url + + def wait(self, interval: int = 5, show_logs: bool = True): + session = get_default_session() + self._refresh_status() + + if show_logs: + job_log_printer = _TrainingJobLogPrinter( + training_job_id=self.training_job_id, page_size=20, session=session + ) + job_log_printer.start() + else: + job_log_printer = None + try: + while not self.is_completed(): + time.sleep(interval) + finally: + if job_log_printer: + job_log_printer.stop(wait=True) + + self._on_job_completed() + + def _on_job_completed(self): + # Print an empty line to separate the training job logs and the following logs + print() + if self.status == TrainingJobStatus.Succeed: + print( + f"Training job ({self.training_job_id}) succeeded, you can check the" + f" logs/metrics/output in the console:\n{self.console_uri}" + ) + elif self.status == TrainingJobStatus.Terminated: + print( + f"Training job is ended with status {self.status}: " + f"reason_code={self.reason_code}, reason_message={self.reason_message}." + f"Check the training job in the console:\n{self.console_uri}" + ) + elif self.status in TrainingJobStatus.failed_status(): + print( + f"Training job ({self.training_job_id}) failed, please check the logs" + f" in the console: \n{self.console_uri}" + ) + + message = f"TrainingJob failed: name={self.training_job_name}, " + f"training_job_id={self.training_job_id}, " + f"reason_code={self.reason_code}, status={self.status}, " + f"reason_message={self.reason_message}" + + raise UnexpectedStatusException(message=message, status=self.status) + + def _refresh_status(self): + """Reload the training job from the PAI Service,""" + session = get_default_session() + training_job = type(self).model_validate( + session.training_job_api.get(training_job_id=self.training_job_id) + ) + self.status = training_job.status + + def is_succeeded(self): + """Return True if the training job is succeeded""" + self._refresh_status() + return self.status == TrainingJobStatus.Succeed + + @retry(wait_secs=10) + def is_completed(self): + """Return True if the training job is completed, including failed status""" + if self.status in TrainingJobStatus.completed_status(): + return True + self._refresh_status() + + return self.status in TrainingJobStatus.completed_status() + + +class _TrainingJobLogPrinter(object): + """A class used to print logs for a training job""" + + executor = ThreadPoolExecutor(5) + + def __init__( + self, training_job_id: str, page_size=10, session: Optional[Session] = None + ): + self.training_job_id = training_job_id + self.session = session + self.page_size = page_size + self._future = None + self._stop = False + + def _list_logs_api(self, page_number: int = 1): + try: + res = self.session.training_job_api.list_logs( + self.training_job_id, + page_number=page_number, + page_size=self.page_size, + ) + return res + except TeaException as e: + # hack: Backend service may raise an exception when the training job + # instance is not found. + if e.code == "TRAINING_JOB_INSTANCE_NOT_FOUND": + return PaginatedResult(items=[], total_count=0) + else: + raise e + + def _list_logs(self): + page_number, page_offset = 1, 0 + # print training job logs. + while not self._stop: + res = self._list_logs_api(page_number=page_number) + # 1. move to next page + if len(res.items) == self.page_size: + # print new logs starting from page_offset + self._print_logs(logs=res.items[page_offset:]) + page_number += 1 + page_offset = 0 + # 2. stay at the current page. + else: + if len(res.items) > page_offset: + # print new logs starting from page_offset + self._print_logs(logs=res.items[page_offset:]) + page_offset = len(res.items) + time.sleep(1) + + # When _stop is True, wait and print remaining logs. + time.sleep(10) + while True: + res = self._list_logs_api(page_number=page_number) + # There maybe more logs in the next page + if len(res.items) == self.page_size: + self._print_logs(logs=res.items[page_offset:]) + page_number += 1 + page_offset = 0 + # No more logs in the next page. + else: + if len(res.items) > page_offset: + self._print_logs(logs=res.items[page_offset:]) + break + + def _print_logs(self, logs: List[str]): + for log in logs: + print(log) + + def start(self): + if self._future: + raise ValueError("The training job log printer is already started") + self._stop = False + self._future = self.executor.submit(self._list_logs) + + def stop(self, wait: bool = True): + self._stop = True + if self._future: + self._future.result() + + +class _TrainingJobSubmitter(object): + """A class used to submit a training job to the PAI service.""" + + def __init__( + self, + base_job_name: Optional[str] = None, + output_path: Optional[str] = None, + experiment_config: Optional[ExperimentConfig] = None, + user_vpc_config: Optional[UserVpcConfig] = None, + max_run_time: Optional[int] = None, + instance_type: Optional[str] = None, + instance_spec: Optional[Dict] = None, + instance_count: Optional[int] = None, + resource_id: Optional[Dict] = None, + resource_type: Optional[Union[str, ResourceType]] = None, + spot_spec: Optional[SpotSpec] = None, + environments: Optional[Dict] = None, + requirements: Optional[List[str]] = None, + labels: Optional[Dict[str, str]] = None, + settings: Optional[Dict[str, Any]] = None, + ): + self.session = get_default_session() + self._training_jobs = [] + self.base_job_name = base_job_name or type(self).__name__.lower() + self.output_path = output_path + self.user_vpc_config = user_vpc_config + self.spot_spec = spot_spec + self.experiment_config = experiment_config + self.max_run_time = max_run_time + self.instance_type = instance_type + self.instance_spec = instance_spec + self.instance_count = instance_count or 1 + self.resource_id = resource_id + self.resource_type = ResourceType(resource_type) if resource_type else None + self.environments = environments + self.requirements = requirements + self.settings = settings + self.labels = labels + + def wait(self, interval: int = 5, show_logs: bool = True, all_jobs: bool = False): + """Block until the jobs is completed. + + Args: + interval(int): Interval to reload job status + show_logs(bool): Specifies whether to fetch and print the logs produced by + the job. + all_jobs(bool): Wait latest job or wait all jobs in processor, show_logs disabled while + wait all jobs. + + Raises: + RuntimeError: If no job is submitted. + + """ + if all_jobs: + if not self._training_jobs: + raise RuntimeError("Could not find any submitted job.") + remains = set(self._training_jobs) + while remains: + for job in self._training_jobs: + if job in remains and job.is_completed(): + remains.remove(job) + + time.sleep(interval) + self._generate_jobs_report() + else: + latest_job = self.latest_job + if not latest_job: + raise RuntimeError("Could not find a submitted job.") + latest_job.wait(interval=interval, show_logs=show_logs) + return latest_job + + def _generate_jobs_report(self): + """Generate current jobs report and output to stdout""" + print(f"Jobs status report, total jobs count: {len(self._training_jobs)}") + rows = [] + headers = ["JobName", "JobID", "Status"] + for job in self._training_jobs: + rows.append([job.training_job_name, job.id, job.status]) + print_table(headers, rows) + + def job_name(self, job_name: Optional[str] = None): + if job_name: + return job_name + sep = "-" + base_name = self.base_job_name + return name_from_base(base_name, sep) + + def build_inputs( + self, + inputs: Dict[str, Any], + input_channels: List[Channel], + default_inputs: Optional[Dict[str, Any]] = None, + ) -> List[Dict[str, str]]: + res = [] + inputs = inputs or dict() + input_channels = input_channels or [] + default_inputs = default_inputs or {} + + inputs = {**default_inputs, **inputs} + requires = {ch.name for ch in input_channels if ch.required} - set( + inputs.keys() + ) + if requires: + raise ValueError( + "Required input channels are not provided: {}".format( + ",".join(requires) + ) + ) + for name, item in inputs.items(): + input_config = self._get_input_config(name, item) + res.append(input_config.model_dump()) + + return res + + @staticmethod + def _default_training_output_channels() -> List[Channel]: + channels = [ + Channel( + name=DEFAULT_OUTPUT_MODEL_CHANNEL_NAME, + description="Training output models", + required=True, + ), + Channel( + name=DEFAULT_CHECKPOINT_CHANNEL_NAME, + description="Training checkpoints channel", + required=False, + ), + Channel( + name=DEFAULT_TENSORBOARD_CHANNEL_NAME, + properties={"ossAppendable": "true"}, + description="TensorBoard logs channel", + required=False, + ), + ] + + return channels + + def _training_job_base_output(self, job_name): + job_name = to_plain_text(job_name) + if self.output_path: + if not is_oss_uri(self.output_path): + raise ValueError("Output path should be an OSS path.") + return os.path.join(self.output_path, f"{job_name}_{random_str(6)}") + + session = get_default_session() + bucket_name = session.oss_bucket.bucket_name + storage_path = session.get_storage_path_by_category( + StoragePathCategory.TrainingJob, + f"{to_plain_text(job_name)}_{random_str(6)}", + ) + base_output_path = f"oss://{bucket_name}/{storage_path}" + return base_output_path + + def build_outputs( + self, + job_name: str, + output_channels: List[Channel], + outputs: Optional[Dict[str, Any]] = None, + ) -> List[Dict[str, str]]: + base_output_path = self._training_job_base_output(job_name) + res = [] + outputs = outputs or dict() + + for ch in output_channels: + if ch.name in outputs: + output = self._get_output_config(name=ch.name, item=outputs[ch.name]) + else: + output_uri = as_oss_dir_uri(posixpath.join(base_output_path, ch.name)) + output = UriOutput(name=ch.name, output_uri=output_uri) + res.append(output) + + extra_outputs = set(outputs.keys()) - {ch.name for ch in output_channels} + + for name in extra_outputs: + output = self._get_output_config( + name=name, + item=outputs[name], + ) + res.append(output) + + return [item.model_dump() for item in res] + + # TODO: get arguments, such as VPCConfig, instance_type etc, from self instance. + def _submit( + self, + job_name: str, + algorithm_spec: Optional[AlgorithmSpec] = None, + algorithm_name: Optional[str] = None, + algorithm_version: Optional[str] = None, + algorithm_provider: Optional[str] = None, + instance_count: int = 1, + instance_type: Optional[str] = None, + instance_spec: Optional[InstanceSpec] = None, + resource_id: Optional[str] = None, + inputs: Optional[List[Dict[str, Any]]] = None, + outputs: Optional[List[Dict[str, Any]]] = None, + hyperparameters: Optional[Dict[str, str]] = None, + max_run_time: Optional[int] = None, + environments: Optional[Dict[str, str]] = None, + user_vpc_config: Optional[Dict[str, str]] = None, + requirements: Optional[List[str]] = None, + experiment_config: Optional[Dict[str, Any]] = None, + labels: Optional[Dict[str, str]] = None, + wait: bool = True, + show_logs: bool = False, + ): + session = get_default_session() + + if not self.resource_type or self.resource_type == ResourceType.General: + resource_type = None + else: + resource_type = self.resource_type.value + + if self.spot_spec: + spot_spec = { + "SpotStrategy": self.spot_spec.spot_strategy.value, + } + if self.spot_spec.spot_discount_limit: + spot_spec["SpotDiscountLimit"] = self.spot_spec.spot_discount_limit + else: + spot_spec = None + + # user vpc + if self.user_vpc_config: + user_vpc_config = { + "VpcId": self.user_vpc_config.vpc_id, + "SecurityGroupId": self.user_vpc_config.security_group_id, + } + else: + user_vpc_config = None + + training_job_id = session.training_job_api.create( + instance_count=instance_count, + instance_spec=instance_spec.model_dump() if instance_spec else None, + algorithm_name=algorithm_name, + algorithm_provider=algorithm_provider, + experiment_config=( + experiment_config.model_dump() + if experiment_config and isinstance(experiment_config, ExperimentConfig) + else experiment_config + ), + spot_spec=spot_spec, + algorithm_version=algorithm_version, + instance_type=instance_type, + resource_id=resource_id, + resource_type=resource_type, + job_name=job_name, + hyperparameters=hyperparameters, + max_running_in_seconds=max_run_time, + input_channels=inputs, + output_channels=outputs, + algorithm_spec=algorithm_spec.model_dump() if algorithm_spec else None, + requirements=requirements, + user_vpc_config=user_vpc_config, + labels=labels, + environments=environments, + settings=self.settings, + ) + training_job = TrainingJob.get(training_job_id) + self._training_jobs.append(training_job) + print( + f"View the job detail by accessing the console URI: {training_job.console_uri}" + ) + if wait: + training_job.wait(show_logs=show_logs) + return training_job + + @classmethod + def _get_input_config( + cls, name: str, item: Union[str, "FileSystemInputBase", DatasetConfig] + ) -> Union[UriInput, DatasetConfig]: + """Get input uri for training_job from given input.""" + from pai.estimator import FileSystemInputBase + + if not isinstance(item, (str, FileSystemInputBase, DatasetConfig)): + raise ValueError(f"Input data of type {type(item)} is not supported.") + + if isinstance(item, FileSystemInputBase): + input_ = UriInput( + name=name, + input_uri=item.to_input_uri(), + ) + elif isinstance(item, DatasetConfig): + input_ = DatasetConfig( + name=name, + dataset_id=item.dataset_id, + ) + elif is_oss_uri(item) or is_filesystem_uri(item) or is_odps_table_uri(item): + input_ = UriInput( + name=name, + input_uri=item, + ) + elif isinstance(item, str): + if os.path.exists(item): + store_path = Session.get_storage_path_by_category( + StoragePathCategory.InputData + ) + input_ = UriInput(name=name, input_uri=upload(item, store_path)) + else: + raise ValueError("Invalid input data path, file not found: {item}.") + else: + raise ValueError( + f"Invalid input data, supported inputs are OSS, NAS, MaxCompute " + f"table or local path: {type(item)}." + ) + return input_ + + @classmethod + def _get_output_config( + cls, name: str, item: str + ) -> Union[UriOutput, DatasetConfig]: + from pai.estimator import FileSystemInputBase + + if not isinstance(item, (str, FileSystemInputBase, DatasetConfig)): + raise ValueError(f"Output data of type {type(item)} is not supported.") + + if isinstance(item, FileSystemInputBase): + output = UriOutput( + name=name, + output_uri=item.to_input_uri(), + ) + elif isinstance(item, DatasetConfig): + output = DatasetConfig(name=name, dataset_id=item.dataset_id) + elif is_oss_uri(item) or is_filesystem_uri(item) or is_odps_table_uri(item): + output = UriOutput( + name=name, + output_uri=as_oss_dir_uri(item), + ) + else: + raise ValueError( + "Invalid output data, supported outputs are OSS, NAS, MaxCompute " + ) + + return output + + @property + def latest_job(self) -> "TrainingJob": + return self._training_jobs[-1] if self._training_jobs else None + + def _build_code_input( + self, job_name: str, source_dir: Optional[str], code_dest: Optional[str] = None + ) -> Optional[CodeDir]: + """Upload source files to OSS and return the code input for training job.""" + if not source_dir: + return + if is_oss_uri(source_dir): + code_uri = source_dir + elif not os.path.exists(source_dir): + raise ValueError(f"Source directory {source_dir} does not exist.") + else: + code_dest = code_dest or self.session.get_storage_path_by_category( + StoragePathCategory.TrainingSrc, to_plain_text(job_name) + ) + code_uri = upload( + source_path=source_dir, + oss_path=code_dest, + bucket=self.session.oss_bucket, + ) + oss_uri_obj = OssUriObj(uri=self.session.patch_oss_endpoint(code_uri)) + code_dir = CodeDir( + location_type="oss", + location_value=OssLocation( + bucket=oss_uri_obj.bucket_name, + key=oss_uri_obj.object_key, + endpoint=oss_uri_obj.endpoint, + ), + ) + + return code_dir diff --git a/pai/libs/alibabacloud_aiworkspace20210204/__init__.py b/pai/libs/alibabacloud_aiworkspace20210204/__init__.py index b1b276d..5ec5ff3 100644 --- a/pai/libs/alibabacloud_aiworkspace20210204/__init__.py +++ b/pai/libs/alibabacloud_aiworkspace20210204/__init__.py @@ -1 +1 @@ -__version__ = '1.2.10' \ No newline at end of file +__version__ = '5.0.1' \ No newline at end of file diff --git a/pai/libs/alibabacloud_aiworkspace20210204/client.py b/pai/libs/alibabacloud_aiworkspace20210204/client.py index 436ea45..60bd9d8 100644 --- a/pai/libs/alibabacloud_aiworkspace20210204/client.py +++ b/pai/libs/alibabacloud_aiworkspace20210204/client.py @@ -7,24 +7,25 @@ from alibabacloud_tea_openapi import models as open_api_models from alibabacloud_tea_util.client import Client as UtilClient from alibabacloud_endpoint_util.client import Client as EndpointUtilClient -# from alibabacloud_aiworkspace20210204 import models as aiwork_space_20210204_models +from pai.libs.alibabacloud_aiworkspace20210204 import models as aiwork_space_20210204_models from alibabacloud_tea_util import models as util_models from alibabacloud_openapi_util.client import Client as OpenApiUtilClient -from pai.libs.alibabacloud_aiworkspace20210204 import models as aiwork_space_20210204_models class Client(OpenApiClient): """ *\ """ + def __init__( - self, + self, config: open_api_models.Config, ): super().__init__(config) self._endpoint_rule = '' self.check_config(config) - self._endpoint = self.get_endpoint('aiworkspace', self._region_id, self._endpoint_rule, self._network, self._suffix, self._endpoint_map, self._endpoint) + self._endpoint = self.get_endpoint('aiworkspace', self._region_id, self._endpoint_rule, self._network, + self._suffix, self._endpoint_map, self._endpoint) def get_endpoint( self, @@ -48,18 +49,30 @@ def add_image_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.AddImageResponse: + """ + @summary 增加 Image + + @param request: AddImageRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: AddImageResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.accessibility): body['Accessibility'] = request.accessibility if not UtilClient.is_unset(request.description): body['Description'] = request.description + if not UtilClient.is_unset(request.image_id): + body['ImageId'] = request.image_id if not UtilClient.is_unset(request.image_uri): body['ImageUri'] = request.image_uri if not UtilClient.is_unset(request.labels): body['Labels'] = request.labels if not UtilClient.is_unset(request.name): body['Name'] = request.name + if not UtilClient.is_unset(request.size): + body['Size'] = request.size if not UtilClient.is_unset(request.workspace_id): body['WorkspaceId'] = request.workspace_id req = open_api_models.OpenApiRequest( @@ -88,18 +101,30 @@ async def add_image_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.AddImageResponse: + """ + @summary 增加 Image + + @param request: AddImageRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: AddImageResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.accessibility): body['Accessibility'] = request.accessibility if not UtilClient.is_unset(request.description): body['Description'] = request.description + if not UtilClient.is_unset(request.image_id): + body['ImageId'] = request.image_id if not UtilClient.is_unset(request.image_uri): body['ImageUri'] = request.image_uri if not UtilClient.is_unset(request.labels): body['Labels'] = request.labels if not UtilClient.is_unset(request.name): body['Name'] = request.name + if not UtilClient.is_unset(request.size): + body['Size'] = request.size if not UtilClient.is_unset(request.workspace_id): body['WorkspaceId'] = request.workspace_id req = open_api_models.OpenApiRequest( @@ -126,6 +151,12 @@ def add_image( self, request: aiwork_space_20210204_models.AddImageRequest, ) -> aiwork_space_20210204_models.AddImageResponse: + """ + @summary 增加 Image + + @param request: AddImageRequest + @return: AddImageResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.add_image_with_options(request, headers, runtime) @@ -134,6 +165,12 @@ async def add_image_async( self, request: aiwork_space_20210204_models.AddImageRequest, ) -> aiwork_space_20210204_models.AddImageResponse: + """ + @summary 增加 Image + + @param request: AddImageRequest + @return: AddImageResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.add_image_with_options_async(request, headers, runtime) @@ -145,6 +182,14 @@ def add_image_labels_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.AddImageLabelsResponse: + """ + @summary 增加 Image 的标签 + + @param request: AddImageLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: AddImageLabelsResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.labels): @@ -176,6 +221,14 @@ async def add_image_labels_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.AddImageLabelsResponse: + """ + @summary 增加 Image 的标签 + + @param request: AddImageLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: AddImageLabelsResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.labels): @@ -205,6 +258,12 @@ def add_image_labels( image_id: str, request: aiwork_space_20210204_models.AddImageLabelsRequest, ) -> aiwork_space_20210204_models.AddImageLabelsResponse: + """ + @summary 增加 Image 的标签 + + @param request: AddImageLabelsRequest + @return: AddImageLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.add_image_labels_with_options(image_id, request, headers, runtime) @@ -214,6 +273,12 @@ async def add_image_labels_async( image_id: str, request: aiwork_space_20210204_models.AddImageLabelsRequest, ) -> aiwork_space_20210204_models.AddImageLabelsResponse: + """ + @summary 增加 Image 的标签 + + @param request: AddImageLabelsRequest + @return: AddImageLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.add_image_labels_with_options_async(image_id, request, headers, runtime) @@ -226,6 +291,13 @@ def add_member_role_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.AddMemberRoleResponse: + """ + @summary 增加成员角色 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: AddMemberRoleResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -253,6 +325,13 @@ async def add_member_role_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.AddMemberRoleResponse: + """ + @summary 增加成员角色 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: AddMemberRoleResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -278,6 +357,11 @@ def add_member_role( member_id: str, role_name: str, ) -> aiwork_space_20210204_models.AddMemberRoleResponse: + """ + @summary 增加成员角色 + + @return: AddMemberRoleResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.add_member_role_with_options(workspace_id, member_id, role_name, headers, runtime) @@ -288,6 +372,11 @@ async def add_member_role_async( member_id: str, role_name: str, ) -> aiwork_space_20210204_models.AddMemberRoleResponse: + """ + @summary 增加成员角色 + + @return: AddMemberRoleResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.add_member_role_with_options_async(workspace_id, member_id, role_name, headers, runtime) @@ -299,6 +388,13 @@ def add_workspace_quota_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.AddWorkspaceQuotaResponse: + """ + @summary 添加资源实例配额 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: AddWorkspaceQuotaResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -325,6 +421,13 @@ async def add_workspace_quota_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.AddWorkspaceQuotaResponse: + """ + @summary 添加资源实例配额 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: AddWorkspaceQuotaResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -349,6 +452,11 @@ def add_workspace_quota( workspace_id: str, quota_id: str, ) -> aiwork_space_20210204_models.AddWorkspaceQuotaResponse: + """ + @summary 添加资源实例配额 + + @return: AddWorkspaceQuotaResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.add_workspace_quota_with_options(workspace_id, quota_id, headers, runtime) @@ -358,6 +466,11 @@ async def add_workspace_quota_async( workspace_id: str, quota_id: str, ) -> aiwork_space_20210204_models.AddWorkspaceQuotaResponse: + """ + @summary 添加资源实例配额 + + @return: AddWorkspaceQuotaResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.add_workspace_quota_with_options_async(workspace_id, quota_id, headers, runtime) @@ -368,6 +481,13 @@ def assume_service_identity_role_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.AssumeServiceIdentityRoleResponse: + """ + @summary 用PAI服务账户扮演角色 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: AssumeServiceIdentityRoleResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -393,6 +513,13 @@ async def assume_service_identity_role_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.AssumeServiceIdentityRoleResponse: + """ + @summary 用PAI服务账户扮演角色 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: AssumeServiceIdentityRoleResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -416,6 +543,11 @@ def assume_service_identity_role( self, role_name: str, ) -> aiwork_space_20210204_models.AssumeServiceIdentityRoleResponse: + """ + @summary 用PAI服务账户扮演角色 + + @return: AssumeServiceIdentityRoleResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.assume_service_identity_role_with_options(role_name, headers, runtime) @@ -424,16 +556,137 @@ async def assume_service_identity_role_async( self, role_name: str, ) -> aiwork_space_20210204_models.AssumeServiceIdentityRoleResponse: + """ + @summary 用PAI服务账户扮演角色 + + @return: AssumeServiceIdentityRoleResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.assume_service_identity_role_with_options_async(role_name, headers, runtime) + def change_dataset_owner_with_options( + self, + dataset_id: str, + request: aiwork_space_20210204_models.ChangeDatasetOwnerRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ChangeDatasetOwnerResponse: + """ + @summary 修改API的所有者 + + @param request: ChangeDatasetOwnerRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ChangeDatasetOwnerResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.user_id): + body['UserId'] = request.user_id + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='ChangeDatasetOwner', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/datasets/change/{OpenApiUtilClient.get_encode_param(dataset_id)}', + method='PUT', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ChangeDatasetOwnerResponse(), + self.call_api(params, req, runtime) + ) + + async def change_dataset_owner_with_options_async( + self, + dataset_id: str, + request: aiwork_space_20210204_models.ChangeDatasetOwnerRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ChangeDatasetOwnerResponse: + """ + @summary 修改API的所有者 + + @param request: ChangeDatasetOwnerRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ChangeDatasetOwnerResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.user_id): + body['UserId'] = request.user_id + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='ChangeDatasetOwner', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/datasets/change/{OpenApiUtilClient.get_encode_param(dataset_id)}', + method='PUT', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ChangeDatasetOwnerResponse(), + await self.call_api_async(params, req, runtime) + ) + + def change_dataset_owner( + self, + dataset_id: str, + request: aiwork_space_20210204_models.ChangeDatasetOwnerRequest, + ) -> aiwork_space_20210204_models.ChangeDatasetOwnerResponse: + """ + @summary 修改API的所有者 + + @param request: ChangeDatasetOwnerRequest + @return: ChangeDatasetOwnerResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.change_dataset_owner_with_options(dataset_id, request, headers, runtime) + + async def change_dataset_owner_async( + self, + dataset_id: str, + request: aiwork_space_20210204_models.ChangeDatasetOwnerRequest, + ) -> aiwork_space_20210204_models.ChangeDatasetOwnerResponse: + """ + @summary 修改API的所有者 + + @param request: ChangeDatasetOwnerRequest + @return: ChangeDatasetOwnerResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.change_dataset_owner_with_options_async(dataset_id, request, headers, runtime) + def create_code_source_with_options( self, request: aiwork_space_20210204_models.CreateCodeSourceRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateCodeSourceResponse: + """ + @summary 创建一个代码源配置 + + @param request: CreateCodeSourceRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateCodeSourceResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.accessibility): @@ -480,6 +733,14 @@ async def create_code_source_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateCodeSourceResponse: + """ + @summary 创建一个代码源配置 + + @param request: CreateCodeSourceRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateCodeSourceResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.accessibility): @@ -524,6 +785,12 @@ def create_code_source( self, request: aiwork_space_20210204_models.CreateCodeSourceRequest, ) -> aiwork_space_20210204_models.CreateCodeSourceResponse: + """ + @summary 创建一个代码源配置 + + @param request: CreateCodeSourceRequest + @return: CreateCodeSourceResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.create_code_source_with_options(request, headers, runtime) @@ -532,20 +799,142 @@ async def create_code_source_async( self, request: aiwork_space_20210204_models.CreateCodeSourceRequest, ) -> aiwork_space_20210204_models.CreateCodeSourceResponse: + """ + @summary 创建一个代码源配置 + + @param request: CreateCodeSourceRequest + @return: CreateCodeSourceResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.create_code_source_with_options_async(request, headers, runtime) + def create_collection_with_options( + self, + request: aiwork_space_20210204_models.CreateCollectionRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.CreateCollectionResponse: + """ + @summary 创建Collection + + @param request: CreateCollectionRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateCollectionResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.collection_name): + body['CollectionName'] = request.collection_name + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='CreateCollection', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/collections', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.CreateCollectionResponse(), + self.call_api(params, req, runtime) + ) + + async def create_collection_with_options_async( + self, + request: aiwork_space_20210204_models.CreateCollectionRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.CreateCollectionResponse: + """ + @summary 创建Collection + + @param request: CreateCollectionRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateCollectionResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.collection_name): + body['CollectionName'] = request.collection_name + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='CreateCollection', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/collections', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.CreateCollectionResponse(), + await self.call_api_async(params, req, runtime) + ) + + def create_collection( + self, + request: aiwork_space_20210204_models.CreateCollectionRequest, + ) -> aiwork_space_20210204_models.CreateCollectionResponse: + """ + @summary 创建Collection + + @param request: CreateCollectionRequest + @return: CreateCollectionResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.create_collection_with_options(request, headers, runtime) + + async def create_collection_async( + self, + request: aiwork_space_20210204_models.CreateCollectionRequest, + ) -> aiwork_space_20210204_models.CreateCollectionResponse: + """ + @summary 创建Collection + + @param request: CreateCollectionRequest + @return: CreateCollectionResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.create_collection_with_options_async(request, headers, runtime) + def create_dataset_with_options( self, request: aiwork_space_20210204_models.CreateDatasetRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateDatasetResponse: + """ + @summary 创建数据集 + + @param request: CreateDatasetRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateDatasetResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.accessibility): body['Accessibility'] = request.accessibility + if not UtilClient.is_unset(request.data_count): + body['DataCount'] = request.data_count + if not UtilClient.is_unset(request.data_size): + body['DataSize'] = request.data_size if not UtilClient.is_unset(request.data_source_type): body['DataSourceType'] = request.data_source_type if not UtilClient.is_unset(request.data_type): @@ -560,14 +949,26 @@ def create_dataset_with_options( body['Options'] = request.options if not UtilClient.is_unset(request.property): body['Property'] = request.property + if not UtilClient.is_unset(request.provider): + body['Provider'] = request.provider if not UtilClient.is_unset(request.provider_type): body['ProviderType'] = request.provider_type + if not UtilClient.is_unset(request.source_dataset_id): + body['SourceDatasetId'] = request.source_dataset_id + if not UtilClient.is_unset(request.source_dataset_version): + body['SourceDatasetVersion'] = request.source_dataset_version if not UtilClient.is_unset(request.source_id): body['SourceId'] = request.source_id if not UtilClient.is_unset(request.source_type): body['SourceType'] = request.source_type if not UtilClient.is_unset(request.uri): body['Uri'] = request.uri + if not UtilClient.is_unset(request.user_id): + body['UserId'] = request.user_id + if not UtilClient.is_unset(request.version_description): + body['VersionDescription'] = request.version_description + if not UtilClient.is_unset(request.version_labels): + body['VersionLabels'] = request.version_labels if not UtilClient.is_unset(request.workspace_id): body['WorkspaceId'] = request.workspace_id req = open_api_models.OpenApiRequest( @@ -596,10 +997,22 @@ async def create_dataset_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateDatasetResponse: + """ + @summary 创建数据集 + + @param request: CreateDatasetRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateDatasetResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.accessibility): body['Accessibility'] = request.accessibility + if not UtilClient.is_unset(request.data_count): + body['DataCount'] = request.data_count + if not UtilClient.is_unset(request.data_size): + body['DataSize'] = request.data_size if not UtilClient.is_unset(request.data_source_type): body['DataSourceType'] = request.data_source_type if not UtilClient.is_unset(request.data_type): @@ -614,14 +1027,26 @@ async def create_dataset_with_options_async( body['Options'] = request.options if not UtilClient.is_unset(request.property): body['Property'] = request.property + if not UtilClient.is_unset(request.provider): + body['Provider'] = request.provider if not UtilClient.is_unset(request.provider_type): body['ProviderType'] = request.provider_type + if not UtilClient.is_unset(request.source_dataset_id): + body['SourceDatasetId'] = request.source_dataset_id + if not UtilClient.is_unset(request.source_dataset_version): + body['SourceDatasetVersion'] = request.source_dataset_version if not UtilClient.is_unset(request.source_id): body['SourceId'] = request.source_id if not UtilClient.is_unset(request.source_type): body['SourceType'] = request.source_type if not UtilClient.is_unset(request.uri): body['Uri'] = request.uri + if not UtilClient.is_unset(request.user_id): + body['UserId'] = request.user_id + if not UtilClient.is_unset(request.version_description): + body['VersionDescription'] = request.version_description + if not UtilClient.is_unset(request.version_labels): + body['VersionLabels'] = request.version_labels if not UtilClient.is_unset(request.workspace_id): body['WorkspaceId'] = request.workspace_id req = open_api_models.OpenApiRequest( @@ -648,6 +1073,12 @@ def create_dataset( self, request: aiwork_space_20210204_models.CreateDatasetRequest, ) -> aiwork_space_20210204_models.CreateDatasetResponse: + """ + @summary 创建数据集 + + @param request: CreateDatasetRequest + @return: CreateDatasetResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.create_dataset_with_options(request, headers, runtime) @@ -656,6 +1087,12 @@ async def create_dataset_async( self, request: aiwork_space_20210204_models.CreateDatasetRequest, ) -> aiwork_space_20210204_models.CreateDatasetResponse: + """ + @summary 创建数据集 + + @param request: CreateDatasetRequest + @return: CreateDatasetResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.create_dataset_with_options_async(request, headers, runtime) @@ -667,6 +1104,14 @@ def create_dataset_labels_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateDatasetLabelsResponse: + """ + @summary 创建或更新 Dataset 的标签 + + @param request: CreateDatasetLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateDatasetLabelsResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.labels): @@ -698,6 +1143,14 @@ async def create_dataset_labels_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateDatasetLabelsResponse: + """ + @summary 创建或更新 Dataset 的标签 + + @param request: CreateDatasetLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateDatasetLabelsResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.labels): @@ -727,6 +1180,12 @@ def create_dataset_labels( dataset_id: str, request: aiwork_space_20210204_models.CreateDatasetLabelsRequest, ) -> aiwork_space_20210204_models.CreateDatasetLabelsResponse: + """ + @summary 创建或更新 Dataset 的标签 + + @param request: CreateDatasetLabelsRequest + @return: CreateDatasetLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.create_dataset_labels_with_options(dataset_id, request, headers, runtime) @@ -736,35 +1195,62 @@ async def create_dataset_labels_async( dataset_id: str, request: aiwork_space_20210204_models.CreateDatasetLabelsRequest, ) -> aiwork_space_20210204_models.CreateDatasetLabelsResponse: + """ + @summary 创建或更新 Dataset 的标签 + + @param request: CreateDatasetLabelsRequest + @return: CreateDatasetLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.create_dataset_labels_with_options_async(dataset_id, request, headers, runtime) - def create_default_workspace_with_options( + def create_dataset_version_with_options( self, - request: aiwork_space_20210204_models.CreateDefaultWorkspaceRequest, + dataset_id: str, + request: aiwork_space_20210204_models.CreateDatasetVersionRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.CreateDefaultWorkspaceResponse: + ) -> aiwork_space_20210204_models.CreateDatasetVersionResponse: + """ + @summary 创建数据集版本 + + @param request: CreateDatasetVersionRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateDatasetVersionResponse + """ UtilClient.validate_model(request) body = {} - if not UtilClient.is_unset(request.add_all_ram_users): - body['AddAllRamUsers'] = request.add_all_ram_users + if not UtilClient.is_unset(request.data_count): + body['DataCount'] = request.data_count + if not UtilClient.is_unset(request.data_size): + body['DataSize'] = request.data_size + if not UtilClient.is_unset(request.data_source_type): + body['DataSourceType'] = request.data_source_type if not UtilClient.is_unset(request.description): body['Description'] = request.description - if not UtilClient.is_unset(request.env_types): - body['EnvTypes'] = request.env_types - if not UtilClient.is_unset(request.resources): - body['Resources'] = request.resources + if not UtilClient.is_unset(request.labels): + body['Labels'] = request.labels + if not UtilClient.is_unset(request.options): + body['Options'] = request.options + if not UtilClient.is_unset(request.property): + body['Property'] = request.property + if not UtilClient.is_unset(request.source_id): + body['SourceId'] = request.source_id + if not UtilClient.is_unset(request.source_type): + body['SourceType'] = request.source_type + if not UtilClient.is_unset(request.uri): + body['Uri'] = request.uri req = open_api_models.OpenApiRequest( headers=headers, body=OpenApiUtilClient.parse_to_map(body) ) params = open_api_models.Params( - action='CreateDefaultWorkspace', + action='CreateDatasetVersion', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/defaultWorkspaces', + pathname=f'/api/v1/datasets/{OpenApiUtilClient.get_encode_param(dataset_id)}/versions', method='POST', auth_type='AK', style='ROA', @@ -772,35 +1258,56 @@ def create_default_workspace_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.CreateDefaultWorkspaceResponse(), + aiwork_space_20210204_models.CreateDatasetVersionResponse(), self.call_api(params, req, runtime) ) - async def create_default_workspace_with_options_async( + async def create_dataset_version_with_options_async( self, - request: aiwork_space_20210204_models.CreateDefaultWorkspaceRequest, + dataset_id: str, + request: aiwork_space_20210204_models.CreateDatasetVersionRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.CreateDefaultWorkspaceResponse: + ) -> aiwork_space_20210204_models.CreateDatasetVersionResponse: + """ + @summary 创建数据集版本 + + @param request: CreateDatasetVersionRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateDatasetVersionResponse + """ UtilClient.validate_model(request) body = {} - if not UtilClient.is_unset(request.add_all_ram_users): - body['AddAllRamUsers'] = request.add_all_ram_users + if not UtilClient.is_unset(request.data_count): + body['DataCount'] = request.data_count + if not UtilClient.is_unset(request.data_size): + body['DataSize'] = request.data_size + if not UtilClient.is_unset(request.data_source_type): + body['DataSourceType'] = request.data_source_type if not UtilClient.is_unset(request.description): body['Description'] = request.description - if not UtilClient.is_unset(request.env_types): - body['EnvTypes'] = request.env_types - if not UtilClient.is_unset(request.resources): - body['Resources'] = request.resources + if not UtilClient.is_unset(request.labels): + body['Labels'] = request.labels + if not UtilClient.is_unset(request.options): + body['Options'] = request.options + if not UtilClient.is_unset(request.property): + body['Property'] = request.property + if not UtilClient.is_unset(request.source_id): + body['SourceId'] = request.source_id + if not UtilClient.is_unset(request.source_type): + body['SourceType'] = request.source_type + if not UtilClient.is_unset(request.uri): + body['Uri'] = request.uri req = open_api_models.OpenApiRequest( headers=headers, body=OpenApiUtilClient.parse_to_map(body) ) params = open_api_models.Params( - action='CreateDefaultWorkspace', + action='CreateDatasetVersion', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/defaultWorkspaces', + pathname=f'/api/v1/datasets/{OpenApiUtilClient.get_encode_param(dataset_id)}/versions', method='POST', auth_type='AK', style='ROA', @@ -808,80 +1315,339 @@ async def create_default_workspace_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.CreateDefaultWorkspaceResponse(), + aiwork_space_20210204_models.CreateDatasetVersionResponse(), await self.call_api_async(params, req, runtime) ) - def create_default_workspace( + def create_dataset_version( self, - request: aiwork_space_20210204_models.CreateDefaultWorkspaceRequest, - ) -> aiwork_space_20210204_models.CreateDefaultWorkspaceResponse: + dataset_id: str, + request: aiwork_space_20210204_models.CreateDatasetVersionRequest, + ) -> aiwork_space_20210204_models.CreateDatasetVersionResponse: + """ + @summary 创建数据集版本 + + @param request: CreateDatasetVersionRequest + @return: CreateDatasetVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.create_default_workspace_with_options(request, headers, runtime) + return self.create_dataset_version_with_options(dataset_id, request, headers, runtime) - async def create_default_workspace_async( + async def create_dataset_version_async( self, - request: aiwork_space_20210204_models.CreateDefaultWorkspaceRequest, - ) -> aiwork_space_20210204_models.CreateDefaultWorkspaceResponse: + dataset_id: str, + request: aiwork_space_20210204_models.CreateDatasetVersionRequest, + ) -> aiwork_space_20210204_models.CreateDatasetVersionResponse: + """ + @summary 创建数据集版本 + + @param request: CreateDatasetVersionRequest + @return: CreateDatasetVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.create_default_workspace_with_options_async(request, headers, runtime) + return await self.create_dataset_version_with_options_async(dataset_id, request, headers, runtime) - def create_ding_talk_robot_message_with_options( + def create_dataset_version_labels_with_options( self, - request: aiwork_space_20210204_models.CreateDingTalkRobotMessageRequest, + dataset_id: str, + version_name: str, + request: aiwork_space_20210204_models.CreateDatasetVersionLabelsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.CreateDingTalkRobotMessageResponse: + ) -> aiwork_space_20210204_models.CreateDatasetVersionLabelsResponse: + """ + @summary 创建数据集版本的标签 + + @param request: CreateDatasetVersionLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateDatasetVersionLabelsResponse + """ UtilClient.validate_model(request) body = {} - if not UtilClient.is_unset(request.access_token): - body['AccessToken'] = request.access_token - if not UtilClient.is_unset(request.message): - body['Message'] = request.message - if not UtilClient.is_unset(request.secret): - body['Secret'] = request.secret + if not UtilClient.is_unset(request.labels): + body['Labels'] = request.labels req = open_api_models.OpenApiRequest( headers=headers, body=OpenApiUtilClient.parse_to_map(body) ) params = open_api_models.Params( - action='CreateDingTalkRobotMessage', + action='CreateDatasetVersionLabels', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/dingtalkrobotmessages', - method='POST', + pathname=f'/api/v1/datasets/{OpenApiUtilClient.get_encode_param(dataset_id)}/versions/{OpenApiUtilClient.get_encode_param(version_name)}/labels', + method='PUT', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.CreateDingTalkRobotMessageResponse(), + aiwork_space_20210204_models.CreateDatasetVersionLabelsResponse(), self.call_api(params, req, runtime) ) - async def create_ding_talk_robot_message_with_options_async( + async def create_dataset_version_labels_with_options_async( self, - request: aiwork_space_20210204_models.CreateDingTalkRobotMessageRequest, + dataset_id: str, + version_name: str, + request: aiwork_space_20210204_models.CreateDatasetVersionLabelsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.CreateDingTalkRobotMessageResponse: + ) -> aiwork_space_20210204_models.CreateDatasetVersionLabelsResponse: + """ + @summary 创建数据集版本的标签 + + @param request: CreateDatasetVersionLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateDatasetVersionLabelsResponse + """ UtilClient.validate_model(request) body = {} - if not UtilClient.is_unset(request.access_token): - body['AccessToken'] = request.access_token - if not UtilClient.is_unset(request.message): - body['Message'] = request.message - if not UtilClient.is_unset(request.secret): - body['Secret'] = request.secret + if not UtilClient.is_unset(request.labels): + body['Labels'] = request.labels req = open_api_models.OpenApiRequest( headers=headers, body=OpenApiUtilClient.parse_to_map(body) ) params = open_api_models.Params( - action='CreateDingTalkRobotMessage', + action='CreateDatasetVersionLabels', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/datasets/{OpenApiUtilClient.get_encode_param(dataset_id)}/versions/{OpenApiUtilClient.get_encode_param(version_name)}/labels', + method='PUT', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.CreateDatasetVersionLabelsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def create_dataset_version_labels( + self, + dataset_id: str, + version_name: str, + request: aiwork_space_20210204_models.CreateDatasetVersionLabelsRequest, + ) -> aiwork_space_20210204_models.CreateDatasetVersionLabelsResponse: + """ + @summary 创建数据集版本的标签 + + @param request: CreateDatasetVersionLabelsRequest + @return: CreateDatasetVersionLabelsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.create_dataset_version_labels_with_options(dataset_id, version_name, request, headers, runtime) + + async def create_dataset_version_labels_async( + self, + dataset_id: str, + version_name: str, + request: aiwork_space_20210204_models.CreateDatasetVersionLabelsRequest, + ) -> aiwork_space_20210204_models.CreateDatasetVersionLabelsResponse: + """ + @summary 创建数据集版本的标签 + + @param request: CreateDatasetVersionLabelsRequest + @return: CreateDatasetVersionLabelsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.create_dataset_version_labels_with_options_async(dataset_id, version_name, request, headers, + runtime) + + def create_default_workspace_with_options( + self, + request: aiwork_space_20210204_models.CreateDefaultWorkspaceRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.CreateDefaultWorkspaceResponse: + """ + @summary 创建默认工作空间 + + @param request: CreateDefaultWorkspaceRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateDefaultWorkspaceResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.add_all_ram_users): + body['AddAllRamUsers'] = request.add_all_ram_users + if not UtilClient.is_unset(request.description): + body['Description'] = request.description + if not UtilClient.is_unset(request.env_types): + body['EnvTypes'] = request.env_types + if not UtilClient.is_unset(request.resources): + body['Resources'] = request.resources + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='CreateDefaultWorkspace', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/defaultWorkspaces', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.CreateDefaultWorkspaceResponse(), + self.call_api(params, req, runtime) + ) + + async def create_default_workspace_with_options_async( + self, + request: aiwork_space_20210204_models.CreateDefaultWorkspaceRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.CreateDefaultWorkspaceResponse: + """ + @summary 创建默认工作空间 + + @param request: CreateDefaultWorkspaceRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateDefaultWorkspaceResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.add_all_ram_users): + body['AddAllRamUsers'] = request.add_all_ram_users + if not UtilClient.is_unset(request.description): + body['Description'] = request.description + if not UtilClient.is_unset(request.env_types): + body['EnvTypes'] = request.env_types + if not UtilClient.is_unset(request.resources): + body['Resources'] = request.resources + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='CreateDefaultWorkspace', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/defaultWorkspaces', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.CreateDefaultWorkspaceResponse(), + await self.call_api_async(params, req, runtime) + ) + + def create_default_workspace( + self, + request: aiwork_space_20210204_models.CreateDefaultWorkspaceRequest, + ) -> aiwork_space_20210204_models.CreateDefaultWorkspaceResponse: + """ + @summary 创建默认工作空间 + + @param request: CreateDefaultWorkspaceRequest + @return: CreateDefaultWorkspaceResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.create_default_workspace_with_options(request, headers, runtime) + + async def create_default_workspace_async( + self, + request: aiwork_space_20210204_models.CreateDefaultWorkspaceRequest, + ) -> aiwork_space_20210204_models.CreateDefaultWorkspaceResponse: + """ + @summary 创建默认工作空间 + + @param request: CreateDefaultWorkspaceRequest + @return: CreateDefaultWorkspaceResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.create_default_workspace_with_options_async(request, headers, runtime) + + def create_ding_talk_robot_message_with_options( + self, + request: aiwork_space_20210204_models.CreateDingTalkRobotMessageRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.CreateDingTalkRobotMessageResponse: + """ + @summary 发送特定格式的消息给钉钉机器人 + + @param request: CreateDingTalkRobotMessageRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateDingTalkRobotMessageResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.access_token): + body['AccessToken'] = request.access_token + if not UtilClient.is_unset(request.message): + body['Message'] = request.message + if not UtilClient.is_unset(request.secret): + body['Secret'] = request.secret + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='CreateDingTalkRobotMessage', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/dingtalkrobotmessages', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.CreateDingTalkRobotMessageResponse(), + self.call_api(params, req, runtime) + ) + + async def create_ding_talk_robot_message_with_options_async( + self, + request: aiwork_space_20210204_models.CreateDingTalkRobotMessageRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.CreateDingTalkRobotMessageResponse: + """ + @summary 发送特定格式的消息给钉钉机器人 + + @param request: CreateDingTalkRobotMessageRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateDingTalkRobotMessageResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.access_token): + body['AccessToken'] = request.access_token + if not UtilClient.is_unset(request.message): + body['Message'] = request.message + if not UtilClient.is_unset(request.secret): + body['Secret'] = request.secret + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='CreateDingTalkRobotMessage', version='2021-02-04', protocol='HTTPS', pathname=f'/api/v1/dingtalkrobotmessages', @@ -900,6 +1666,12 @@ def create_ding_talk_robot_message( self, request: aiwork_space_20210204_models.CreateDingTalkRobotMessageRequest, ) -> aiwork_space_20210204_models.CreateDingTalkRobotMessageResponse: + """ + @summary 发送特定格式的消息给钉钉机器人 + + @param request: CreateDingTalkRobotMessageRequest + @return: CreateDingTalkRobotMessageResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.create_ding_talk_robot_message_with_options(request, headers, runtime) @@ -908,10 +1680,136 @@ async def create_ding_talk_robot_message_async( self, request: aiwork_space_20210204_models.CreateDingTalkRobotMessageRequest, ) -> aiwork_space_20210204_models.CreateDingTalkRobotMessageResponse: + """ + @summary 发送特定格式的消息给钉钉机器人 + + @param request: CreateDingTalkRobotMessageRequest + @return: CreateDingTalkRobotMessageResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.create_ding_talk_robot_message_with_options_async(request, headers, runtime) + def create_experiment_with_options( + self, + request: aiwork_space_20210204_models.CreateExperimentRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.CreateExperimentResponse: + """ + @summary 创建实验 + + @param request: CreateExperimentRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateExperimentResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.accessibility): + body['Accessibility'] = request.accessibility + if not UtilClient.is_unset(request.artifact_uri): + body['ArtifactUri'] = request.artifact_uri + if not UtilClient.is_unset(request.labels): + body['Labels'] = request.labels + if not UtilClient.is_unset(request.name): + body['Name'] = request.name + if not UtilClient.is_unset(request.workspace_id): + body['WorkspaceId'] = request.workspace_id + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='CreateExperiment', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/experiments', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.CreateExperimentResponse(), + self.call_api(params, req, runtime) + ) + + async def create_experiment_with_options_async( + self, + request: aiwork_space_20210204_models.CreateExperimentRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.CreateExperimentResponse: + """ + @summary 创建实验 + + @param request: CreateExperimentRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateExperimentResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.accessibility): + body['Accessibility'] = request.accessibility + if not UtilClient.is_unset(request.artifact_uri): + body['ArtifactUri'] = request.artifact_uri + if not UtilClient.is_unset(request.labels): + body['Labels'] = request.labels + if not UtilClient.is_unset(request.name): + body['Name'] = request.name + if not UtilClient.is_unset(request.workspace_id): + body['WorkspaceId'] = request.workspace_id + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='CreateExperiment', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/experiments', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.CreateExperimentResponse(), + await self.call_api_async(params, req, runtime) + ) + + def create_experiment( + self, + request: aiwork_space_20210204_models.CreateExperimentRequest, + ) -> aiwork_space_20210204_models.CreateExperimentResponse: + """ + @summary 创建实验 + + @param request: CreateExperimentRequest + @return: CreateExperimentResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.create_experiment_with_options(request, headers, runtime) + + async def create_experiment_async( + self, + request: aiwork_space_20210204_models.CreateExperimentRequest, + ) -> aiwork_space_20210204_models.CreateExperimentResponse: + """ + @summary 创建实验 + + @param request: CreateExperimentRequest + @return: CreateExperimentResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.create_experiment_with_options_async(request, headers, runtime) + def create_member_with_options( self, workspace_id: str, @@ -919,6 +1817,14 @@ def create_member_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateMemberResponse: + """ + @summary 创建成员 + + @param request: CreateMemberRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateMemberResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.members): @@ -950,6 +1856,14 @@ async def create_member_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateMemberResponse: + """ + @summary 创建成员 + + @param request: CreateMemberRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateMemberResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.members): @@ -979,6 +1893,12 @@ def create_member( workspace_id: str, request: aiwork_space_20210204_models.CreateMemberRequest, ) -> aiwork_space_20210204_models.CreateMemberResponse: + """ + @summary 创建成员 + + @param request: CreateMemberRequest + @return: CreateMemberResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.create_member_with_options(workspace_id, request, headers, runtime) @@ -988,6 +1908,12 @@ async def create_member_async( workspace_id: str, request: aiwork_space_20210204_models.CreateMemberRequest, ) -> aiwork_space_20210204_models.CreateMemberResponse: + """ + @summary 创建成员 + + @param request: CreateMemberRequest + @return: CreateMemberResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.create_member_with_options_async(workspace_id, request, headers, runtime) @@ -998,12 +1924,22 @@ def create_model_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateModelResponse: + """ + @summary 创建模型 + + @param request: CreateModelRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateModelResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.accessibility): body['Accessibility'] = request.accessibility if not UtilClient.is_unset(request.domain): body['Domain'] = request.domain + if not UtilClient.is_unset(request.extra_info): + body['ExtraInfo'] = request.extra_info if not UtilClient.is_unset(request.labels): body['Labels'] = request.labels if not UtilClient.is_unset(request.model_description): @@ -1012,6 +1948,10 @@ def create_model_with_options( body['ModelDoc'] = request.model_doc if not UtilClient.is_unset(request.model_name): body['ModelName'] = request.model_name + if not UtilClient.is_unset(request.model_type): + body['ModelType'] = request.model_type + if not UtilClient.is_unset(request.order_number): + body['OrderNumber'] = request.order_number if not UtilClient.is_unset(request.origin): body['Origin'] = request.origin if not UtilClient.is_unset(request.task): @@ -1044,12 +1984,22 @@ async def create_model_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateModelResponse: + """ + @summary 创建模型 + + @param request: CreateModelRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateModelResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.accessibility): body['Accessibility'] = request.accessibility if not UtilClient.is_unset(request.domain): body['Domain'] = request.domain + if not UtilClient.is_unset(request.extra_info): + body['ExtraInfo'] = request.extra_info if not UtilClient.is_unset(request.labels): body['Labels'] = request.labels if not UtilClient.is_unset(request.model_description): @@ -1058,6 +2008,10 @@ async def create_model_with_options_async( body['ModelDoc'] = request.model_doc if not UtilClient.is_unset(request.model_name): body['ModelName'] = request.model_name + if not UtilClient.is_unset(request.model_type): + body['ModelType'] = request.model_type + if not UtilClient.is_unset(request.order_number): + body['OrderNumber'] = request.order_number if not UtilClient.is_unset(request.origin): body['Origin'] = request.origin if not UtilClient.is_unset(request.task): @@ -1088,6 +2042,12 @@ def create_model( self, request: aiwork_space_20210204_models.CreateModelRequest, ) -> aiwork_space_20210204_models.CreateModelResponse: + """ + @summary 创建模型 + + @param request: CreateModelRequest + @return: CreateModelResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.create_model_with_options(request, headers, runtime) @@ -1096,6 +2056,12 @@ async def create_model_async( self, request: aiwork_space_20210204_models.CreateModelRequest, ) -> aiwork_space_20210204_models.CreateModelResponse: + """ + @summary 创建模型 + + @param request: CreateModelRequest + @return: CreateModelResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.create_model_with_options_async(request, headers, runtime) @@ -1107,6 +2073,14 @@ def create_model_labels_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateModelLabelsResponse: + """ + @summary 创建或更新模型的标签 + + @param request: CreateModelLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateModelLabelsResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.labels): @@ -1138,6 +2112,14 @@ async def create_model_labels_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateModelLabelsResponse: + """ + @summary 创建或更新模型的标签 + + @param request: CreateModelLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateModelLabelsResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.labels): @@ -1167,6 +2149,12 @@ def create_model_labels( model_id: str, request: aiwork_space_20210204_models.CreateModelLabelsRequest, ) -> aiwork_space_20210204_models.CreateModelLabelsResponse: + """ + @summary 创建或更新模型的标签 + + @param request: CreateModelLabelsRequest + @return: CreateModelLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.create_model_labels_with_options(model_id, request, headers, runtime) @@ -1176,6 +2164,12 @@ async def create_model_labels_async( model_id: str, request: aiwork_space_20210204_models.CreateModelLabelsRequest, ) -> aiwork_space_20210204_models.CreateModelLabelsResponse: + """ + @summary 创建或更新模型的标签 + + @param request: CreateModelLabelsRequest + @return: CreateModelLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.create_model_labels_with_options_async(model_id, request, headers, runtime) @@ -1187,8 +2181,18 @@ def create_model_release_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateModelReleaseResponse: + """ + @summary 发布模型 + + @param request: CreateModelReleaseRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateModelReleaseResponse + """ UtilClient.validate_model(request) body = {} + if not UtilClient.is_unset(request.collections): + body['Collections'] = request.collections if not UtilClient.is_unset(request.target_model_origin): body['TargetModelOrigin'] = request.target_model_origin if not UtilClient.is_unset(request.target_model_provider): @@ -1220,8 +2224,18 @@ async def create_model_release_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateModelReleaseResponse: + """ + @summary 发布模型 + + @param request: CreateModelReleaseRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateModelReleaseResponse + """ UtilClient.validate_model(request) body = {} + if not UtilClient.is_unset(request.collections): + body['Collections'] = request.collections if not UtilClient.is_unset(request.target_model_origin): body['TargetModelOrigin'] = request.target_model_origin if not UtilClient.is_unset(request.target_model_provider): @@ -1251,6 +2265,12 @@ def create_model_release( model_id: str, request: aiwork_space_20210204_models.CreateModelReleaseRequest, ) -> aiwork_space_20210204_models.CreateModelReleaseResponse: + """ + @summary 发布模型 + + @param request: CreateModelReleaseRequest + @return: CreateModelReleaseResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.create_model_release_with_options(model_id, request, headers, runtime) @@ -1260,6 +2280,12 @@ async def create_model_release_async( model_id: str, request: aiwork_space_20210204_models.CreateModelReleaseRequest, ) -> aiwork_space_20210204_models.CreateModelReleaseResponse: + """ + @summary 发布模型 + + @param request: CreateModelReleaseRequest + @return: CreateModelReleaseResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.create_model_release_with_options_async(model_id, request, headers, runtime) @@ -1271,10 +2297,24 @@ def create_model_version_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateModelVersionResponse: + """ + @summary 创建模型版本 + + @param request: CreateModelVersionRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateModelVersionResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.approval_status): body['ApprovalStatus'] = request.approval_status + if not UtilClient.is_unset(request.compression_spec): + body['CompressionSpec'] = request.compression_spec + if not UtilClient.is_unset(request.evaluation_spec): + body['EvaluationSpec'] = request.evaluation_spec + if not UtilClient.is_unset(request.extra_info): + body['ExtraInfo'] = request.extra_info if not UtilClient.is_unset(request.format_type): body['FormatType'] = request.format_type if not UtilClient.is_unset(request.framework_type): @@ -1326,10 +2366,24 @@ async def create_model_version_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateModelVersionResponse: + """ + @summary 创建模型版本 + + @param request: CreateModelVersionRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateModelVersionResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.approval_status): body['ApprovalStatus'] = request.approval_status + if not UtilClient.is_unset(request.compression_spec): + body['CompressionSpec'] = request.compression_spec + if not UtilClient.is_unset(request.evaluation_spec): + body['EvaluationSpec'] = request.evaluation_spec + if not UtilClient.is_unset(request.extra_info): + body['ExtraInfo'] = request.extra_info if not UtilClient.is_unset(request.format_type): body['FormatType'] = request.format_type if not UtilClient.is_unset(request.framework_type): @@ -1379,6 +2433,12 @@ def create_model_version( model_id: str, request: aiwork_space_20210204_models.CreateModelVersionRequest, ) -> aiwork_space_20210204_models.CreateModelVersionResponse: + """ + @summary 创建模型版本 + + @param request: CreateModelVersionRequest + @return: CreateModelVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.create_model_version_with_options(model_id, request, headers, runtime) @@ -1388,6 +2448,12 @@ async def create_model_version_async( model_id: str, request: aiwork_space_20210204_models.CreateModelVersionRequest, ) -> aiwork_space_20210204_models.CreateModelVersionResponse: + """ + @summary 创建模型版本 + + @param request: CreateModelVersionRequest + @return: CreateModelVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.create_model_version_with_options_async(model_id, request, headers, runtime) @@ -1400,6 +2466,14 @@ def create_model_version_labels_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateModelVersionLabelsResponse: + """ + @summary 创建或更新模型版本的标签 + + @param request: CreateModelVersionLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateModelVersionLabelsResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.labels): @@ -1432,6 +2506,14 @@ async def create_model_version_labels_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateModelVersionLabelsResponse: + """ + @summary 创建或更新模型版本的标签 + + @param request: CreateModelVersionLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateModelVersionLabelsResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.labels): @@ -1462,6 +2544,12 @@ def create_model_version_labels( version_name: str, request: aiwork_space_20210204_models.CreateModelVersionLabelsRequest, ) -> aiwork_space_20210204_models.CreateModelVersionLabelsResponse: + """ + @summary 创建或更新模型版本的标签 + + @param request: CreateModelVersionLabelsRequest + @return: CreateModelVersionLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.create_model_version_labels_with_options(model_id, version_name, request, headers, runtime) @@ -1472,9 +2560,16 @@ async def create_model_version_labels_async( version_name: str, request: aiwork_space_20210204_models.CreateModelVersionLabelsRequest, ) -> aiwork_space_20210204_models.CreateModelVersionLabelsResponse: + """ + @summary 创建或更新模型版本的标签 + + @param request: CreateModelVersionLabelsRequest + @return: CreateModelVersionLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.create_model_version_labels_with_options_async(model_id, version_name, request, headers, runtime) + return await self.create_model_version_labels_with_options_async(model_id, version_name, request, headers, + runtime) def create_model_version_release_with_options( self, @@ -1484,6 +2579,14 @@ def create_model_version_release_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateModelVersionReleaseResponse: + """ + @summary 发布模型版本 + + @param request: CreateModelVersionReleaseRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateModelVersionReleaseResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.target_model_origin): @@ -1518,6 +2621,14 @@ async def create_model_version_release_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateModelVersionReleaseResponse: + """ + @summary 发布模型版本 + + @param request: CreateModelVersionReleaseRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateModelVersionReleaseResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.target_model_origin): @@ -1550,6 +2661,12 @@ def create_model_version_release( version_name: str, request: aiwork_space_20210204_models.CreateModelVersionReleaseRequest, ) -> aiwork_space_20210204_models.CreateModelVersionReleaseResponse: + """ + @summary 发布模型版本 + + @param request: CreateModelVersionReleaseRequest + @return: CreateModelVersionReleaseResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.create_model_version_release_with_options(model_id, version_name, request, headers, runtime) @@ -1560,9 +2677,16 @@ async def create_model_version_release_async( version_name: str, request: aiwork_space_20210204_models.CreateModelVersionReleaseRequest, ) -> aiwork_space_20210204_models.CreateModelVersionReleaseResponse: + """ + @summary 发布模型版本 + + @param request: CreateModelVersionReleaseRequest + @return: CreateModelVersionReleaseResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.create_model_version_release_with_options_async(model_id, version_name, request, headers, runtime) + return await self.create_model_version_release_with_options_async(model_id, version_name, request, headers, + runtime) def create_product_orders_with_options( self, @@ -1570,6 +2694,14 @@ def create_product_orders_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateProductOrdersResponse: + """ + @summary 创建产品订单 + + @param request: CreateProductOrdersRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateProductOrdersResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.auto_pay): @@ -1602,6 +2734,14 @@ async def create_product_orders_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateProductOrdersResponse: + """ + @summary 创建产品订单 + + @param request: CreateProductOrdersRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateProductOrdersResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.auto_pay): @@ -1632,6 +2772,12 @@ def create_product_orders( self, request: aiwork_space_20210204_models.CreateProductOrdersRequest, ) -> aiwork_space_20210204_models.CreateProductOrdersResponse: + """ + @summary 创建产品订单 + + @param request: CreateProductOrdersRequest + @return: CreateProductOrdersResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.create_product_orders_with_options(request, headers, runtime) @@ -1640,6 +2786,12 @@ async def create_product_orders_async( self, request: aiwork_space_20210204_models.CreateProductOrdersRequest, ) -> aiwork_space_20210204_models.CreateProductOrdersResponse: + """ + @summary 创建产品订单 + + @param request: CreateProductOrdersRequest + @return: CreateProductOrdersResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.create_product_orders_with_options_async(request, headers, runtime) @@ -1650,6 +2802,14 @@ def create_service_identity_role_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateServiceIdentityRoleResponse: + """ + @summary 创建被PAI服务账户扮演的角色 + + @param request: CreateServiceIdentityRoleRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateServiceIdentityRoleResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.role_name): @@ -1680,6 +2840,14 @@ async def create_service_identity_role_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateServiceIdentityRoleResponse: + """ + @summary 创建被PAI服务账户扮演的角色 + + @param request: CreateServiceIdentityRoleRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateServiceIdentityRoleResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.role_name): @@ -1708,6 +2876,12 @@ def create_service_identity_role( self, request: aiwork_space_20210204_models.CreateServiceIdentityRoleRequest, ) -> aiwork_space_20210204_models.CreateServiceIdentityRoleResponse: + """ + @summary 创建被PAI服务账户扮演的角色 + + @param request: CreateServiceIdentityRoleRequest + @return: CreateServiceIdentityRoleResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.create_service_identity_role_with_options(request, headers, runtime) @@ -1716,15 +2890,385 @@ async def create_service_identity_role_async( self, request: aiwork_space_20210204_models.CreateServiceIdentityRoleRequest, ) -> aiwork_space_20210204_models.CreateServiceIdentityRoleResponse: + """ + @summary 创建被PAI服务账户扮演的角色 + + @param request: CreateServiceIdentityRoleRequest + @return: CreateServiceIdentityRoleResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.create_service_identity_role_with_options_async(request, headers, runtime) + def create_service_template_with_options( + self, + request: aiwork_space_20210204_models.CreateServiceTemplateRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.CreateServiceTemplateResponse: + """ + @summary 创建服务模版 + + @param request: CreateServiceTemplateRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateServiceTemplateResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.inference_spec): + body['InferenceSpec'] = request.inference_spec + if not UtilClient.is_unset(request.labels): + body['Labels'] = request.labels + if not UtilClient.is_unset(request.order_number): + body['OrderNumber'] = request.order_number + if not UtilClient.is_unset(request.provider): + body['Provider'] = request.provider + if not UtilClient.is_unset(request.service_template_description): + body['ServiceTemplateDescription'] = request.service_template_description + if not UtilClient.is_unset(request.service_template_doc): + body['ServiceTemplateDoc'] = request.service_template_doc + if not UtilClient.is_unset(request.service_template_name): + body['ServiceTemplateName'] = request.service_template_name + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='CreateServiceTemplate', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/servicetemplates', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.CreateServiceTemplateResponse(), + self.call_api(params, req, runtime) + ) + + async def create_service_template_with_options_async( + self, + request: aiwork_space_20210204_models.CreateServiceTemplateRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.CreateServiceTemplateResponse: + """ + @summary 创建服务模版 + + @param request: CreateServiceTemplateRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateServiceTemplateResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.inference_spec): + body['InferenceSpec'] = request.inference_spec + if not UtilClient.is_unset(request.labels): + body['Labels'] = request.labels + if not UtilClient.is_unset(request.order_number): + body['OrderNumber'] = request.order_number + if not UtilClient.is_unset(request.provider): + body['Provider'] = request.provider + if not UtilClient.is_unset(request.service_template_description): + body['ServiceTemplateDescription'] = request.service_template_description + if not UtilClient.is_unset(request.service_template_doc): + body['ServiceTemplateDoc'] = request.service_template_doc + if not UtilClient.is_unset(request.service_template_name): + body['ServiceTemplateName'] = request.service_template_name + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='CreateServiceTemplate', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/servicetemplates', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.CreateServiceTemplateResponse(), + await self.call_api_async(params, req, runtime) + ) + + def create_service_template( + self, + request: aiwork_space_20210204_models.CreateServiceTemplateRequest, + ) -> aiwork_space_20210204_models.CreateServiceTemplateResponse: + """ + @summary 创建服务模版 + + @param request: CreateServiceTemplateRequest + @return: CreateServiceTemplateResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.create_service_template_with_options(request, headers, runtime) + + async def create_service_template_async( + self, + request: aiwork_space_20210204_models.CreateServiceTemplateRequest, + ) -> aiwork_space_20210204_models.CreateServiceTemplateResponse: + """ + @summary 创建服务模版 + + @param request: CreateServiceTemplateRequest + @return: CreateServiceTemplateResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.create_service_template_with_options_async(request, headers, runtime) + + def create_service_template_labels_with_options( + self, + service_template_id: str, + request: aiwork_space_20210204_models.CreateServiceTemplateLabelsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.CreateServiceTemplateLabelsResponse: + """ + @summary 创建或更新服务模版的标签 + + @param request: CreateServiceTemplateLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateServiceTemplateLabelsResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.labels): + body['Labels'] = request.labels + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='CreateServiceTemplateLabels', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/servicetemplates/{OpenApiUtilClient.get_encode_param(service_template_id)}/labels', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.CreateServiceTemplateLabelsResponse(), + self.call_api(params, req, runtime) + ) + + async def create_service_template_labels_with_options_async( + self, + service_template_id: str, + request: aiwork_space_20210204_models.CreateServiceTemplateLabelsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.CreateServiceTemplateLabelsResponse: + """ + @summary 创建或更新服务模版的标签 + + @param request: CreateServiceTemplateLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateServiceTemplateLabelsResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.labels): + body['Labels'] = request.labels + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='CreateServiceTemplateLabels', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/servicetemplates/{OpenApiUtilClient.get_encode_param(service_template_id)}/labels', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.CreateServiceTemplateLabelsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def create_service_template_labels( + self, + service_template_id: str, + request: aiwork_space_20210204_models.CreateServiceTemplateLabelsRequest, + ) -> aiwork_space_20210204_models.CreateServiceTemplateLabelsResponse: + """ + @summary 创建或更新服务模版的标签 + + @param request: CreateServiceTemplateLabelsRequest + @return: CreateServiceTemplateLabelsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.create_service_template_labels_with_options(service_template_id, request, headers, runtime) + + async def create_service_template_labels_async( + self, + service_template_id: str, + request: aiwork_space_20210204_models.CreateServiceTemplateLabelsRequest, + ) -> aiwork_space_20210204_models.CreateServiceTemplateLabelsResponse: + """ + @summary 创建或更新服务模版的标签 + + @param request: CreateServiceTemplateLabelsRequest + @return: CreateServiceTemplateLabelsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.create_service_template_labels_with_options_async(service_template_id, request, headers, + runtime) + + def create_trial_with_options( + self, + request: aiwork_space_20210204_models.CreateTrialRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.CreateTrialResponse: + """ + @summary 创建Trial + + @param request: CreateTrialRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateTrialResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.experiment_id): + body['ExperimentId'] = request.experiment_id + if not UtilClient.is_unset(request.labels): + body['Labels'] = request.labels + if not UtilClient.is_unset(request.name): + body['Name'] = request.name + if not UtilClient.is_unset(request.source_id): + body['SourceId'] = request.source_id + if not UtilClient.is_unset(request.source_type): + body['SourceType'] = request.source_type + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='CreateTrial', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/trials', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.CreateTrialResponse(), + self.call_api(params, req, runtime) + ) + + async def create_trial_with_options_async( + self, + request: aiwork_space_20210204_models.CreateTrialRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.CreateTrialResponse: + """ + @summary 创建Trial + + @param request: CreateTrialRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateTrialResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.experiment_id): + body['ExperimentId'] = request.experiment_id + if not UtilClient.is_unset(request.labels): + body['Labels'] = request.labels + if not UtilClient.is_unset(request.name): + body['Name'] = request.name + if not UtilClient.is_unset(request.source_id): + body['SourceId'] = request.source_id + if not UtilClient.is_unset(request.source_type): + body['SourceType'] = request.source_type + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='CreateTrial', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/trials', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.CreateTrialResponse(), + await self.call_api_async(params, req, runtime) + ) + + def create_trial( + self, + request: aiwork_space_20210204_models.CreateTrialRequest, + ) -> aiwork_space_20210204_models.CreateTrialResponse: + """ + @summary 创建Trial + + @param request: CreateTrialRequest + @return: CreateTrialResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.create_trial_with_options(request, headers, runtime) + + async def create_trial_async( + self, + request: aiwork_space_20210204_models.CreateTrialRequest, + ) -> aiwork_space_20210204_models.CreateTrialResponse: + """ + @summary 创建Trial + + @param request: CreateTrialRequest + @return: CreateTrialResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.create_trial_with_options_async(request, headers, runtime) + def create_user_with_options( self, headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateUserResponse: + """ + @summary 创建用户 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateUserResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1749,6 +3293,13 @@ async def create_user_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateUserResponse: + """ + @summary 创建用户 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateUserResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1769,11 +3320,21 @@ async def create_user_with_options_async( ) def create_user(self) -> aiwork_space_20210204_models.CreateUserResponse: + """ + @summary 创建用户 + + @return: CreateUserResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.create_user_with_options(headers, runtime) async def create_user_async(self) -> aiwork_space_20210204_models.CreateUserResponse: + """ + @summary 创建用户 + + @return: CreateUserResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.create_user_with_options_async(headers, runtime) @@ -1784,6 +3345,14 @@ def create_workspace_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateWorkspaceResponse: + """ + @summary 创建工作空间 + + @param request: CreateWorkspaceRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateWorkspaceResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.description): @@ -1820,6 +3389,14 @@ async def create_workspace_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateWorkspaceResponse: + """ + @summary 创建工作空间 + + @param request: CreateWorkspaceRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateWorkspaceResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.description): @@ -1854,6 +3431,12 @@ def create_workspace( self, request: aiwork_space_20210204_models.CreateWorkspaceRequest, ) -> aiwork_space_20210204_models.CreateWorkspaceResponse: + """ + @summary 创建工作空间 + + @param request: CreateWorkspaceRequest + @return: CreateWorkspaceResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.create_workspace_with_options(request, headers, runtime) @@ -1862,6 +3445,12 @@ async def create_workspace_async( self, request: aiwork_space_20210204_models.CreateWorkspaceRequest, ) -> aiwork_space_20210204_models.CreateWorkspaceResponse: + """ + @summary 创建工作空间 + + @param request: CreateWorkspaceRequest + @return: CreateWorkspaceResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.create_workspace_with_options_async(request, headers, runtime) @@ -1873,6 +3462,14 @@ def create_workspace_resource_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateWorkspaceResourceResponse: + """ + @summary 创建资源 + + @param request: CreateWorkspaceResourceRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateWorkspaceResourceResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.option): @@ -1906,6 +3503,14 @@ async def create_workspace_resource_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.CreateWorkspaceResourceResponse: + """ + @summary 创建资源 + + @param request: CreateWorkspaceResourceRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateWorkspaceResourceResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.option): @@ -1937,6 +3542,12 @@ def create_workspace_resource( workspace_id: str, request: aiwork_space_20210204_models.CreateWorkspaceResourceRequest, ) -> aiwork_space_20210204_models.CreateWorkspaceResourceResponse: + """ + @summary 创建资源 + + @param request: CreateWorkspaceResourceRequest + @return: CreateWorkspaceResourceResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.create_workspace_resource_with_options(workspace_id, request, headers, runtime) @@ -1946,6 +3557,12 @@ async def create_workspace_resource_async( workspace_id: str, request: aiwork_space_20210204_models.CreateWorkspaceResourceRequest, ) -> aiwork_space_20210204_models.CreateWorkspaceResourceResponse: + """ + @summary 创建资源 + + @param request: CreateWorkspaceResourceRequest + @return: CreateWorkspaceResourceResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.create_workspace_resource_with_options_async(workspace_id, request, headers, runtime) @@ -1956,6 +3573,13 @@ def delete_code_source_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.DeleteCodeSourceResponse: + """ + @summary 删除一个代码源配置 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteCodeSourceResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1981,6 +3605,13 @@ async def delete_code_source_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.DeleteCodeSourceResponse: + """ + @summary 删除一个代码源配置 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteCodeSourceResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -2004,6 +3635,11 @@ def delete_code_source( self, code_source_id: str, ) -> aiwork_space_20210204_models.DeleteCodeSourceResponse: + """ + @summary 删除一个代码源配置 + + @return: DeleteCodeSourceResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.delete_code_source_with_options(code_source_id, headers, runtime) @@ -2012,25 +3648,36 @@ async def delete_code_source_async( self, code_source_id: str, ) -> aiwork_space_20210204_models.DeleteCodeSourceResponse: + """ + @summary 删除一个代码源配置 + + @return: DeleteCodeSourceResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.delete_code_source_with_options_async(code_source_id, headers, runtime) - def delete_config_with_options( + def delete_collection_with_options( self, - workspace_id: str, - config_key: str, + collection_name: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.DeleteConfigResponse: + ) -> aiwork_space_20210204_models.DeleteCollectionResponse: + """ + @summary 删除Collection + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteCollectionResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) params = open_api_models.Params( - action='DeleteConfig', + action='DeleteCollection', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/configs/{OpenApiUtilClient.get_encode_param(config_key)}', + pathname=f'/api/v1/collections/{OpenApiUtilClient.get_encode_param(collection_name)}', method='DELETE', auth_type='AK', style='ROA', @@ -2038,25 +3685,31 @@ def delete_config_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.DeleteConfigResponse(), + aiwork_space_20210204_models.DeleteCollectionResponse(), self.call_api(params, req, runtime) ) - async def delete_config_with_options_async( + async def delete_collection_with_options_async( self, - workspace_id: str, - config_key: str, + collection_name: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.DeleteConfigResponse: + ) -> aiwork_space_20210204_models.DeleteCollectionResponse: + """ + @summary 删除Collection + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteCollectionResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) params = open_api_models.Params( - action='DeleteConfig', + action='DeleteCollection', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/configs/{OpenApiUtilClient.get_encode_param(config_key)}', + pathname=f'/api/v1/collections/{OpenApiUtilClient.get_encode_param(collection_name)}', method='DELETE', auth_type='AK', style='ROA', @@ -2064,36 +3717,163 @@ async def delete_config_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.DeleteConfigResponse(), + aiwork_space_20210204_models.DeleteCollectionResponse(), await self.call_api_async(params, req, runtime) ) - def delete_config( + def delete_collection( self, - workspace_id: str, - config_key: str, - ) -> aiwork_space_20210204_models.DeleteConfigResponse: + collection_name: str, + ) -> aiwork_space_20210204_models.DeleteCollectionResponse: + """ + @summary 删除Collection + + @return: DeleteCollectionResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.delete_config_with_options(workspace_id, config_key, headers, runtime) + return self.delete_collection_with_options(collection_name, headers, runtime) - async def delete_config_async( + async def delete_collection_async( self, - workspace_id: str, - config_key: str, - ) -> aiwork_space_20210204_models.DeleteConfigResponse: + collection_name: str, + ) -> aiwork_space_20210204_models.DeleteCollectionResponse: + """ + @summary 删除Collection + + @return: DeleteCollectionResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.delete_config_with_options_async(workspace_id, config_key, headers, runtime) + return await self.delete_collection_with_options_async(collection_name, headers, runtime) - def delete_dataset_with_options( + def delete_config_with_options( self, - dataset_id: str, + workspace_id: str, + config_key: str, + request: aiwork_space_20210204_models.DeleteConfigRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.DeleteDatasetResponse: + ) -> aiwork_space_20210204_models.DeleteConfigResponse: + """ + @summary 删除配置 + + @param request: DeleteConfigRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteConfigResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.labels): + query['Labels'] = request.labels req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='DeleteConfig', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/configs/{OpenApiUtilClient.get_encode_param(config_key)}', + method='DELETE', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.DeleteConfigResponse(), + self.call_api(params, req, runtime) + ) + + async def delete_config_with_options_async( + self, + workspace_id: str, + config_key: str, + request: aiwork_space_20210204_models.DeleteConfigRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.DeleteConfigResponse: + """ + @summary 删除配置 + + @param request: DeleteConfigRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteConfigResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.labels): + query['Labels'] = request.labels + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='DeleteConfig', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/configs/{OpenApiUtilClient.get_encode_param(config_key)}', + method='DELETE', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.DeleteConfigResponse(), + await self.call_api_async(params, req, runtime) + ) + + def delete_config( + self, + workspace_id: str, + config_key: str, + request: aiwork_space_20210204_models.DeleteConfigRequest, + ) -> aiwork_space_20210204_models.DeleteConfigResponse: + """ + @summary 删除配置 + + @param request: DeleteConfigRequest + @return: DeleteConfigResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.delete_config_with_options(workspace_id, config_key, request, headers, runtime) + + async def delete_config_async( + self, + workspace_id: str, + config_key: str, + request: aiwork_space_20210204_models.DeleteConfigRequest, + ) -> aiwork_space_20210204_models.DeleteConfigResponse: + """ + @summary 删除配置 + + @param request: DeleteConfigRequest + @return: DeleteConfigResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.delete_config_with_options_async(workspace_id, config_key, request, headers, runtime) + + def delete_dataset_with_options( + self, + dataset_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.DeleteDatasetResponse: + """ + @summary 删除数据集 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteDatasetResponse + """ + req = open_api_models.OpenApiRequest( + headers=headers ) params = open_api_models.Params( action='DeleteDataset', @@ -2117,6 +3897,13 @@ async def delete_dataset_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.DeleteDatasetResponse: + """ + @summary 删除数据集 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteDatasetResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -2140,6 +3927,11 @@ def delete_dataset( self, dataset_id: str, ) -> aiwork_space_20210204_models.DeleteDatasetResponse: + """ + @summary 删除数据集 + + @return: DeleteDatasetResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.delete_dataset_with_options(dataset_id, headers, runtime) @@ -2148,6 +3940,11 @@ async def delete_dataset_async( self, dataset_id: str, ) -> aiwork_space_20210204_models.DeleteDatasetResponse: + """ + @summary 删除数据集 + + @return: DeleteDatasetResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.delete_dataset_with_options_async(dataset_id, headers, runtime) @@ -2159,10 +3956,16 @@ def delete_dataset_labels_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.DeleteDatasetLabelsResponse: + """ + @summary 删除 Dataset 的标签 + + @param request: DeleteDatasetLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteDatasetLabelsResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.keys): - query['Keys'] = request.keys if not UtilClient.is_unset(request.label_keys): query['LabelKeys'] = request.label_keys req = open_api_models.OpenApiRequest( @@ -2192,10 +3995,16 @@ async def delete_dataset_labels_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.DeleteDatasetLabelsResponse: + """ + @summary 删除 Dataset 的标签 + + @param request: DeleteDatasetLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteDatasetLabelsResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.keys): - query['Keys'] = request.keys if not UtilClient.is_unset(request.label_keys): query['LabelKeys'] = request.label_keys req = open_api_models.OpenApiRequest( @@ -2223,6 +4032,12 @@ def delete_dataset_labels( dataset_id: str, request: aiwork_space_20210204_models.DeleteDatasetLabelsRequest, ) -> aiwork_space_20210204_models.DeleteDatasetLabelsResponse: + """ + @summary 删除 Dataset 的标签 + + @param request: DeleteDatasetLabelsRequest + @return: DeleteDatasetLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.delete_dataset_labels_with_options(dataset_id, request, headers, runtime) @@ -2232,30 +4047,38 @@ async def delete_dataset_labels_async( dataset_id: str, request: aiwork_space_20210204_models.DeleteDatasetLabelsRequest, ) -> aiwork_space_20210204_models.DeleteDatasetLabelsResponse: + """ + @summary 删除 Dataset 的标签 + + @param request: DeleteDatasetLabelsRequest + @return: DeleteDatasetLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.delete_dataset_labels_with_options_async(dataset_id, request, headers, runtime) - def delete_members_with_options( + def delete_dataset_version_with_options( self, - workspace_id: str, - request: aiwork_space_20210204_models.DeleteMembersRequest, + dataset_id: str, + version_name: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.DeleteMembersResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.member_ids): - query['MemberIds'] = request.member_ids + ) -> aiwork_space_20210204_models.DeleteDatasetVersionResponse: + """ + @summary 删除指定版本的数据集信息,如果删除的版本是该数据集的仅存版本,版本删除后会联动删除dataset 表中的数据集信息 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteDatasetVersionResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='DeleteMembers', + action='DeleteDatasetVersion', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/members', + pathname=f'/api/v1/datasets/{OpenApiUtilClient.get_encode_param(dataset_id)}/versions/{OpenApiUtilClient.get_encode_param(version_name)}', method='DELETE', auth_type='AK', style='ROA', @@ -2263,30 +4086,32 @@ def delete_members_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.DeleteMembersResponse(), + aiwork_space_20210204_models.DeleteDatasetVersionResponse(), self.call_api(params, req, runtime) ) - async def delete_members_with_options_async( + async def delete_dataset_version_with_options_async( self, - workspace_id: str, - request: aiwork_space_20210204_models.DeleteMembersRequest, + dataset_id: str, + version_name: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.DeleteMembersResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.member_ids): - query['MemberIds'] = request.member_ids + ) -> aiwork_space_20210204_models.DeleteDatasetVersionResponse: + """ + @summary 删除指定版本的数据集信息,如果删除的版本是该数据集的仅存版本,版本删除后会联动删除dataset 表中的数据集信息 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteDatasetVersionResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='DeleteMembers', + action='DeleteDatasetVersion', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/members', + pathname=f'/api/v1/datasets/{OpenApiUtilClient.get_encode_param(dataset_id)}/versions/{OpenApiUtilClient.get_encode_param(version_name)}', method='DELETE', auth_type='AK', style='ROA', @@ -2294,42 +4119,67 @@ async def delete_members_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.DeleteMembersResponse(), + aiwork_space_20210204_models.DeleteDatasetVersionResponse(), await self.call_api_async(params, req, runtime) ) - def delete_members( + def delete_dataset_version( self, - workspace_id: str, - request: aiwork_space_20210204_models.DeleteMembersRequest, - ) -> aiwork_space_20210204_models.DeleteMembersResponse: + dataset_id: str, + version_name: str, + ) -> aiwork_space_20210204_models.DeleteDatasetVersionResponse: + """ + @summary 删除指定版本的数据集信息,如果删除的版本是该数据集的仅存版本,版本删除后会联动删除dataset 表中的数据集信息 + + @return: DeleteDatasetVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.delete_members_with_options(workspace_id, request, headers, runtime) + return self.delete_dataset_version_with_options(dataset_id, version_name, headers, runtime) - async def delete_members_async( + async def delete_dataset_version_async( self, - workspace_id: str, - request: aiwork_space_20210204_models.DeleteMembersRequest, - ) -> aiwork_space_20210204_models.DeleteMembersResponse: + dataset_id: str, + version_name: str, + ) -> aiwork_space_20210204_models.DeleteDatasetVersionResponse: + """ + @summary 删除指定版本的数据集信息,如果删除的版本是该数据集的仅存版本,版本删除后会联动删除dataset 表中的数据集信息 + + @return: DeleteDatasetVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.delete_members_with_options_async(workspace_id, request, headers, runtime) + return await self.delete_dataset_version_with_options_async(dataset_id, version_name, headers, runtime) - def delete_model_with_options( + def delete_dataset_version_labels_with_options( self, - model_id: str, + dataset_id: str, + version_name: str, + request: aiwork_space_20210204_models.DeleteDatasetVersionLabelsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.DeleteModelResponse: + ) -> aiwork_space_20210204_models.DeleteDatasetVersionLabelsResponse: + """ + @summary 删除数据集版本的标签。 + + @param request: DeleteDatasetVersionLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteDatasetVersionLabelsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.keys): + query['Keys'] = request.keys req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='DeleteModel', + action='DeleteDatasetVersionLabels', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}', + pathname=f'/api/v1/datasets/{OpenApiUtilClient.get_encode_param(dataset_id)}/versions/{OpenApiUtilClient.get_encode_param(version_name)}/labels', method='DELETE', auth_type='AK', style='ROA', @@ -2337,24 +4187,39 @@ def delete_model_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.DeleteModelResponse(), + aiwork_space_20210204_models.DeleteDatasetVersionLabelsResponse(), self.call_api(params, req, runtime) ) - async def delete_model_with_options_async( + async def delete_dataset_version_labels_with_options_async( self, - model_id: str, + dataset_id: str, + version_name: str, + request: aiwork_space_20210204_models.DeleteDatasetVersionLabelsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.DeleteModelResponse: + ) -> aiwork_space_20210204_models.DeleteDatasetVersionLabelsResponse: + """ + @summary 删除数据集版本的标签。 + + @param request: DeleteDatasetVersionLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteDatasetVersionLabelsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.keys): + query['Keys'] = request.keys req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='DeleteModel', + action='DeleteDatasetVersionLabels', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}', + pathname=f'/api/v1/datasets/{OpenApiUtilClient.get_encode_param(dataset_id)}/versions/{OpenApiUtilClient.get_encode_param(version_name)}/labels', method='DELETE', auth_type='AK', style='ROA', @@ -2362,46 +4227,64 @@ async def delete_model_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.DeleteModelResponse(), + aiwork_space_20210204_models.DeleteDatasetVersionLabelsResponse(), await self.call_api_async(params, req, runtime) ) - def delete_model( + def delete_dataset_version_labels( self, - model_id: str, - ) -> aiwork_space_20210204_models.DeleteModelResponse: + dataset_id: str, + version_name: str, + request: aiwork_space_20210204_models.DeleteDatasetVersionLabelsRequest, + ) -> aiwork_space_20210204_models.DeleteDatasetVersionLabelsResponse: + """ + @summary 删除数据集版本的标签。 + + @param request: DeleteDatasetVersionLabelsRequest + @return: DeleteDatasetVersionLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.delete_model_with_options(model_id, headers, runtime) + return self.delete_dataset_version_labels_with_options(dataset_id, version_name, request, headers, runtime) - async def delete_model_async( + async def delete_dataset_version_labels_async( self, - model_id: str, - ) -> aiwork_space_20210204_models.DeleteModelResponse: + dataset_id: str, + version_name: str, + request: aiwork_space_20210204_models.DeleteDatasetVersionLabelsRequest, + ) -> aiwork_space_20210204_models.DeleteDatasetVersionLabelsResponse: + """ + @summary 删除数据集版本的标签。 + + @param request: DeleteDatasetVersionLabelsRequest + @return: DeleteDatasetVersionLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.delete_model_with_options_async(model_id, headers, runtime) + return await self.delete_dataset_version_labels_with_options_async(dataset_id, version_name, request, headers, + runtime) - def delete_model_domain_with_options( + def delete_experiment_with_options( self, - model_domain_id: str, - request: aiwork_space_20210204_models.DeleteModelDomainRequest, + experiment_id: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.DeleteModelDomainResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.model_task_ids): - query['ModelTaskIds'] = request.model_task_ids + ) -> aiwork_space_20210204_models.DeleteExperimentResponse: + """ + @summary 删除实验 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteExperimentResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='DeleteModelDomain', + action='DeleteExperiment', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/modeldomains/{OpenApiUtilClient.get_encode_param(model_domain_id)}', + pathname=f'/api/v1/experiments/{OpenApiUtilClient.get_encode_param(experiment_id)}', method='DELETE', auth_type='AK', style='ROA', @@ -2409,30 +4292,31 @@ def delete_model_domain_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.DeleteModelDomainResponse(), + aiwork_space_20210204_models.DeleteExperimentResponse(), self.call_api(params, req, runtime) ) - async def delete_model_domain_with_options_async( + async def delete_experiment_with_options_async( self, - model_domain_id: str, - request: aiwork_space_20210204_models.DeleteModelDomainRequest, + experiment_id: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.DeleteModelDomainResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.model_task_ids): - query['ModelTaskIds'] = request.model_task_ids + ) -> aiwork_space_20210204_models.DeleteExperimentResponse: + """ + @summary 删除实验 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteExperimentResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='DeleteModelDomain', + action='DeleteExperiment', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/modeldomains/{OpenApiUtilClient.get_encode_param(model_domain_id)}', + pathname=f'/api/v1/experiments/{OpenApiUtilClient.get_encode_param(experiment_id)}', method='DELETE', auth_type='AK', style='ROA', @@ -2440,50 +4324,58 @@ async def delete_model_domain_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.DeleteModelDomainResponse(), + aiwork_space_20210204_models.DeleteExperimentResponse(), await self.call_api_async(params, req, runtime) ) - def delete_model_domain( + def delete_experiment( self, - model_domain_id: str, - request: aiwork_space_20210204_models.DeleteModelDomainRequest, - ) -> aiwork_space_20210204_models.DeleteModelDomainResponse: + experiment_id: str, + ) -> aiwork_space_20210204_models.DeleteExperimentResponse: + """ + @summary 删除实验 + + @return: DeleteExperimentResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.delete_model_domain_with_options(model_domain_id, request, headers, runtime) + return self.delete_experiment_with_options(experiment_id, headers, runtime) - async def delete_model_domain_async( + async def delete_experiment_async( self, - model_domain_id: str, - request: aiwork_space_20210204_models.DeleteModelDomainRequest, - ) -> aiwork_space_20210204_models.DeleteModelDomainResponse: + experiment_id: str, + ) -> aiwork_space_20210204_models.DeleteExperimentResponse: + """ + @summary 删除实验 + + @return: DeleteExperimentResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.delete_model_domain_with_options_async(model_domain_id, request, headers, runtime) + return await self.delete_experiment_with_options_async(experiment_id, headers, runtime) - def delete_model_labels_with_options( + def delete_experiment_label_with_options( self, - model_id: str, - request: aiwork_space_20210204_models.DeleteModelLabelsRequest, + experiment_id: str, + key: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.DeleteModelLabelsResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.keys): - query['Keys'] = request.keys - if not UtilClient.is_unset(request.label_keys): - query['LabelKeys'] = request.label_keys + ) -> aiwork_space_20210204_models.DeleteExperimentLabelResponse: + """ + @summary 删除实验标签 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteExperimentLabelResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='DeleteModelLabels', + action='DeleteExperimentLabel', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}/labels', + pathname=f'/api/v1/experiments/{OpenApiUtilClient.get_encode_param(experiment_id)}/labels/{OpenApiUtilClient.get_encode_param(key)}', method='DELETE', auth_type='AK', style='ROA', @@ -2491,32 +4383,32 @@ def delete_model_labels_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.DeleteModelLabelsResponse(), + aiwork_space_20210204_models.DeleteExperimentLabelResponse(), self.call_api(params, req, runtime) ) - async def delete_model_labels_with_options_async( + async def delete_experiment_label_with_options_async( self, - model_id: str, - request: aiwork_space_20210204_models.DeleteModelLabelsRequest, + experiment_id: str, + key: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.DeleteModelLabelsResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.keys): - query['Keys'] = request.keys - if not UtilClient.is_unset(request.label_keys): - query['LabelKeys'] = request.label_keys + ) -> aiwork_space_20210204_models.DeleteExperimentLabelResponse: + """ + @summary 删除实验标签 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteExperimentLabelResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='DeleteModelLabels', + action='DeleteExperimentLabel', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}/labels', + pathname=f'/api/v1/experiments/{OpenApiUtilClient.get_encode_param(experiment_id)}/labels/{OpenApiUtilClient.get_encode_param(key)}', method='DELETE', auth_type='AK', style='ROA', @@ -2524,43 +4416,66 @@ async def delete_model_labels_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.DeleteModelLabelsResponse(), + aiwork_space_20210204_models.DeleteExperimentLabelResponse(), await self.call_api_async(params, req, runtime) ) - def delete_model_labels( + def delete_experiment_label( self, - model_id: str, - request: aiwork_space_20210204_models.DeleteModelLabelsRequest, - ) -> aiwork_space_20210204_models.DeleteModelLabelsResponse: + experiment_id: str, + key: str, + ) -> aiwork_space_20210204_models.DeleteExperimentLabelResponse: + """ + @summary 删除实验标签 + + @return: DeleteExperimentLabelResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.delete_model_labels_with_options(model_id, request, headers, runtime) + return self.delete_experiment_label_with_options(experiment_id, key, headers, runtime) - async def delete_model_labels_async( + async def delete_experiment_label_async( self, - model_id: str, - request: aiwork_space_20210204_models.DeleteModelLabelsRequest, - ) -> aiwork_space_20210204_models.DeleteModelLabelsResponse: + experiment_id: str, + key: str, + ) -> aiwork_space_20210204_models.DeleteExperimentLabelResponse: + """ + @summary 删除实验标签 + + @return: DeleteExperimentLabelResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.delete_model_labels_with_options_async(model_id, request, headers, runtime) + return await self.delete_experiment_label_with_options_async(experiment_id, key, headers, runtime) - def delete_model_version_with_options( + def delete_members_with_options( self, - model_id: str, - version_name: str, + workspace_id: str, + request: aiwork_space_20210204_models.DeleteMembersRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.DeleteModelVersionResponse: + ) -> aiwork_space_20210204_models.DeleteMembersResponse: + """ + @summary 删除工作空间成员 + + @param request: DeleteMembersRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteMembersResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.member_ids): + query['MemberIds'] = request.member_ids req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='DeleteModelVersion', + action='DeleteMembers', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}/versions/{OpenApiUtilClient.get_encode_param(version_name)}', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/members', method='DELETE', auth_type='AK', style='ROA', @@ -2568,25 +4483,38 @@ def delete_model_version_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.DeleteModelVersionResponse(), + aiwork_space_20210204_models.DeleteMembersResponse(), self.call_api(params, req, runtime) ) - async def delete_model_version_with_options_async( + async def delete_members_with_options_async( self, - model_id: str, - version_name: str, + workspace_id: str, + request: aiwork_space_20210204_models.DeleteMembersRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.DeleteModelVersionResponse: + ) -> aiwork_space_20210204_models.DeleteMembersResponse: + """ + @summary 删除工作空间成员 + + @param request: DeleteMembersRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteMembersResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.member_ids): + query['MemberIds'] = request.member_ids req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='DeleteModelVersion', + action='DeleteMembers', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}/versions/{OpenApiUtilClient.get_encode_param(version_name)}', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/members', method='DELETE', auth_type='AK', style='ROA', @@ -2594,51 +4522,61 @@ async def delete_model_version_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.DeleteModelVersionResponse(), + aiwork_space_20210204_models.DeleteMembersResponse(), await self.call_api_async(params, req, runtime) ) - def delete_model_version( + def delete_members( self, - model_id: str, - version_name: str, - ) -> aiwork_space_20210204_models.DeleteModelVersionResponse: + workspace_id: str, + request: aiwork_space_20210204_models.DeleteMembersRequest, + ) -> aiwork_space_20210204_models.DeleteMembersResponse: + """ + @summary 删除工作空间成员 + + @param request: DeleteMembersRequest + @return: DeleteMembersResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.delete_model_version_with_options(model_id, version_name, headers, runtime) + return self.delete_members_with_options(workspace_id, request, headers, runtime) - async def delete_model_version_async( + async def delete_members_async( self, - model_id: str, - version_name: str, - ) -> aiwork_space_20210204_models.DeleteModelVersionResponse: + workspace_id: str, + request: aiwork_space_20210204_models.DeleteMembersRequest, + ) -> aiwork_space_20210204_models.DeleteMembersResponse: + """ + @summary 删除工作空间成员 + + @param request: DeleteMembersRequest + @return: DeleteMembersResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.delete_model_version_with_options_async(model_id, version_name, headers, runtime) + return await self.delete_members_with_options_async(workspace_id, request, headers, runtime) - def delete_model_version_labels_with_options( + def delete_model_with_options( self, model_id: str, - version_name: str, - request: aiwork_space_20210204_models.DeleteModelVersionLabelsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.DeleteModelVersionLabelsResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.keys): - query['Keys'] = request.keys - if not UtilClient.is_unset(request.label_keys): - query['LabelKeys'] = request.label_keys + ) -> aiwork_space_20210204_models.DeleteModelResponse: + """ + @summary 删除模型 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteModelResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='DeleteModelVersionLabels', + action='DeleteModel', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}/versions/{OpenApiUtilClient.get_encode_param(version_name)}/labels', + pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}', method='DELETE', auth_type='AK', style='ROA', @@ -2646,33 +4584,31 @@ def delete_model_version_labels_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.DeleteModelVersionLabelsResponse(), + aiwork_space_20210204_models.DeleteModelResponse(), self.call_api(params, req, runtime) ) - async def delete_model_version_labels_with_options_async( + async def delete_model_with_options_async( self, model_id: str, - version_name: str, - request: aiwork_space_20210204_models.DeleteModelVersionLabelsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.DeleteModelVersionLabelsResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.keys): - query['Keys'] = request.keys - if not UtilClient.is_unset(request.label_keys): - query['LabelKeys'] = request.label_keys + ) -> aiwork_space_20210204_models.DeleteModelResponse: + """ + @summary 删除模型 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteModelResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='DeleteModelVersionLabels', + action='DeleteModel', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}/versions/{OpenApiUtilClient.get_encode_param(version_name)}/labels', + pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}', method='DELETE', auth_type='AK', style='ROA', @@ -2680,44 +4616,64 @@ async def delete_model_version_labels_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.DeleteModelVersionLabelsResponse(), + aiwork_space_20210204_models.DeleteModelResponse(), await self.call_api_async(params, req, runtime) ) - def delete_model_version_labels( + def delete_model( self, model_id: str, - version_name: str, - request: aiwork_space_20210204_models.DeleteModelVersionLabelsRequest, - ) -> aiwork_space_20210204_models.DeleteModelVersionLabelsResponse: + ) -> aiwork_space_20210204_models.DeleteModelResponse: + """ + @summary 删除模型 + + @return: DeleteModelResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.delete_model_version_labels_with_options(model_id, version_name, request, headers, runtime) + return self.delete_model_with_options(model_id, headers, runtime) - async def delete_model_version_labels_async( + async def delete_model_async( self, model_id: str, - version_name: str, - request: aiwork_space_20210204_models.DeleteModelVersionLabelsRequest, - ) -> aiwork_space_20210204_models.DeleteModelVersionLabelsResponse: + ) -> aiwork_space_20210204_models.DeleteModelResponse: + """ + @summary 删除模型 + + @return: DeleteModelResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.delete_model_version_labels_with_options_async(model_id, version_name, request, headers, runtime) + return await self.delete_model_with_options_async(model_id, headers, runtime) - def delete_workspace_with_options( + def delete_model_domain_with_options( self, - workspace_id: str, + model_domain_id: str, + request: aiwork_space_20210204_models.DeleteModelDomainRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.DeleteWorkspaceResponse: + ) -> aiwork_space_20210204_models.DeleteModelDomainResponse: + """ + @summary 删除模型领域 + + @param request: DeleteModelDomainRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteModelDomainResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.model_task_ids): + query['ModelTaskIds'] = request.model_task_ids req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='DeleteWorkspace', + action='DeleteModelDomain', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}', + pathname=f'/api/v1/modeldomains/{OpenApiUtilClient.get_encode_param(model_domain_id)}', method='DELETE', auth_type='AK', style='ROA', @@ -2725,24 +4681,38 @@ def delete_workspace_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.DeleteWorkspaceResponse(), + aiwork_space_20210204_models.DeleteModelDomainResponse(), self.call_api(params, req, runtime) ) - async def delete_workspace_with_options_async( + async def delete_model_domain_with_options_async( self, - workspace_id: str, + model_domain_id: str, + request: aiwork_space_20210204_models.DeleteModelDomainRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.DeleteWorkspaceResponse: + ) -> aiwork_space_20210204_models.DeleteModelDomainResponse: + """ + @summary 删除模型领域 + + @param request: DeleteModelDomainRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteModelDomainResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.model_task_ids): + query['ModelTaskIds'] = request.model_task_ids req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='DeleteWorkspace', + action='DeleteModelDomain', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}', + pathname=f'/api/v1/modeldomains/{OpenApiUtilClient.get_encode_param(model_domain_id)}', method='DELETE', auth_type='AK', style='ROA', @@ -2750,52 +4720,70 @@ async def delete_workspace_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.DeleteWorkspaceResponse(), + aiwork_space_20210204_models.DeleteModelDomainResponse(), await self.call_api_async(params, req, runtime) ) - def delete_workspace( + def delete_model_domain( self, - workspace_id: str, - ) -> aiwork_space_20210204_models.DeleteWorkspaceResponse: + model_domain_id: str, + request: aiwork_space_20210204_models.DeleteModelDomainRequest, + ) -> aiwork_space_20210204_models.DeleteModelDomainResponse: + """ + @summary 删除模型领域 + + @param request: DeleteModelDomainRequest + @return: DeleteModelDomainResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.delete_workspace_with_options(workspace_id, headers, runtime) + return self.delete_model_domain_with_options(model_domain_id, request, headers, runtime) - async def delete_workspace_async( + async def delete_model_domain_async( self, - workspace_id: str, - ) -> aiwork_space_20210204_models.DeleteWorkspaceResponse: + model_domain_id: str, + request: aiwork_space_20210204_models.DeleteModelDomainRequest, + ) -> aiwork_space_20210204_models.DeleteModelDomainResponse: + """ + @summary 删除模型领域 + + @param request: DeleteModelDomainRequest + @return: DeleteModelDomainResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.delete_workspace_with_options_async(workspace_id, headers, runtime) + return await self.delete_model_domain_with_options_async(model_domain_id, request, headers, runtime) - def delete_workspace_resource_with_options( + def delete_model_labels_with_options( self, - workspace_id: str, - request: aiwork_space_20210204_models.DeleteWorkspaceResourceRequest, + model_id: str, + request: aiwork_space_20210204_models.DeleteModelLabelsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.DeleteWorkspaceResourceResponse: + ) -> aiwork_space_20210204_models.DeleteModelLabelsResponse: + """ + @summary 删除模型的标签 + + @param request: DeleteModelLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteModelLabelsResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.group_name): - query['GroupName'] = request.group_name - if not UtilClient.is_unset(request.option): - query['Option'] = request.option - if not UtilClient.is_unset(request.product_type): - query['ProductType'] = request.product_type - if not UtilClient.is_unset(request.resource_type): - query['ResourceType'] = request.resource_type + if not UtilClient.is_unset(request.keys): + query['Keys'] = request.keys + if not UtilClient.is_unset(request.label_keys): + query['LabelKeys'] = request.label_keys req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='DeleteWorkspaceResource', + action='DeleteModelLabels', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/resources', + pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}/labels', method='DELETE', auth_type='AK', style='ROA', @@ -2803,36 +4791,40 @@ def delete_workspace_resource_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.DeleteWorkspaceResourceResponse(), + aiwork_space_20210204_models.DeleteModelLabelsResponse(), self.call_api(params, req, runtime) ) - async def delete_workspace_resource_with_options_async( + async def delete_model_labels_with_options_async( self, - workspace_id: str, - request: aiwork_space_20210204_models.DeleteWorkspaceResourceRequest, + model_id: str, + request: aiwork_space_20210204_models.DeleteModelLabelsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.DeleteWorkspaceResourceResponse: + ) -> aiwork_space_20210204_models.DeleteModelLabelsResponse: + """ + @summary 删除模型的标签 + + @param request: DeleteModelLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteModelLabelsResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.group_name): - query['GroupName'] = request.group_name - if not UtilClient.is_unset(request.option): - query['Option'] = request.option - if not UtilClient.is_unset(request.product_type): - query['ProductType'] = request.product_type - if not UtilClient.is_unset(request.resource_type): - query['ResourceType'] = request.resource_type + if not UtilClient.is_unset(request.keys): + query['Keys'] = request.keys + if not UtilClient.is_unset(request.label_keys): + query['LabelKeys'] = request.label_keys req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='DeleteWorkspaceResource', + action='DeleteModelLabels', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/resources', + pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}/labels', method='DELETE', auth_type='AK', style='ROA', @@ -2840,638 +4832,915 @@ async def delete_workspace_resource_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.DeleteWorkspaceResourceResponse(), + aiwork_space_20210204_models.DeleteModelLabelsResponse(), await self.call_api_async(params, req, runtime) ) - def delete_workspace_resource( + def delete_model_labels( self, - workspace_id: str, - request: aiwork_space_20210204_models.DeleteWorkspaceResourceRequest, - ) -> aiwork_space_20210204_models.DeleteWorkspaceResourceResponse: - runtime = util_models.RuntimeOptions() - headers = {} - return self.delete_workspace_resource_with_options(workspace_id, request, headers, runtime) + model_id: str, + request: aiwork_space_20210204_models.DeleteModelLabelsRequest, + ) -> aiwork_space_20210204_models.DeleteModelLabelsResponse: + """ + @summary 删除模型的标签 - async def delete_workspace_resource_async( + @param request: DeleteModelLabelsRequest + @return: DeleteModelLabelsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.delete_model_labels_with_options(model_id, request, headers, runtime) + + async def delete_model_labels_async( self, - workspace_id: str, - request: aiwork_space_20210204_models.DeleteWorkspaceResourceRequest, - ) -> aiwork_space_20210204_models.DeleteWorkspaceResourceResponse: + model_id: str, + request: aiwork_space_20210204_models.DeleteModelLabelsRequest, + ) -> aiwork_space_20210204_models.DeleteModelLabelsResponse: + """ + @summary 删除模型的标签 + + @param request: DeleteModelLabelsRequest + @return: DeleteModelLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.delete_workspace_resource_with_options_async(workspace_id, request, headers, runtime) + return await self.delete_model_labels_with_options_async(model_id, request, headers, runtime) - def get_code_source_with_options( + def delete_model_version_with_options( self, - code_source_id: str, + model_id: str, + version_name: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.GetCodeSourceResponse: + ) -> aiwork_space_20210204_models.DeleteModelVersionResponse: + """ + @summary 删除模型版本 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteModelVersionResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) params = open_api_models.Params( - action='GetCodeSource', + action='DeleteModelVersion', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/codesources/{OpenApiUtilClient.get_encode_param(code_source_id)}', - method='GET', + pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}/versions/{OpenApiUtilClient.get_encode_param(version_name)}', + method='DELETE', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.GetCodeSourceResponse(), + aiwork_space_20210204_models.DeleteModelVersionResponse(), self.call_api(params, req, runtime) ) - async def get_code_source_with_options_async( + async def delete_model_version_with_options_async( self, - code_source_id: str, + model_id: str, + version_name: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.GetCodeSourceResponse: + ) -> aiwork_space_20210204_models.DeleteModelVersionResponse: + """ + @summary 删除模型版本 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteModelVersionResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) params = open_api_models.Params( - action='GetCodeSource', + action='DeleteModelVersion', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/codesources/{OpenApiUtilClient.get_encode_param(code_source_id)}', - method='GET', + pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}/versions/{OpenApiUtilClient.get_encode_param(version_name)}', + method='DELETE', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.GetCodeSourceResponse(), + aiwork_space_20210204_models.DeleteModelVersionResponse(), await self.call_api_async(params, req, runtime) ) - def get_code_source( + def delete_model_version( self, - code_source_id: str, - ) -> aiwork_space_20210204_models.GetCodeSourceResponse: + model_id: str, + version_name: str, + ) -> aiwork_space_20210204_models.DeleteModelVersionResponse: + """ + @summary 删除模型版本 + + @return: DeleteModelVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.get_code_source_with_options(code_source_id, headers, runtime) + return self.delete_model_version_with_options(model_id, version_name, headers, runtime) - async def get_code_source_async( + async def delete_model_version_async( self, - code_source_id: str, - ) -> aiwork_space_20210204_models.GetCodeSourceResponse: + model_id: str, + version_name: str, + ) -> aiwork_space_20210204_models.DeleteModelVersionResponse: + """ + @summary 删除模型版本 + + @return: DeleteModelVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.get_code_source_with_options_async(code_source_id, headers, runtime) + return await self.delete_model_version_with_options_async(model_id, version_name, headers, runtime) - def get_code_sources_statistics_with_options( + def delete_model_version_labels_with_options( self, - request: aiwork_space_20210204_models.GetCodeSourcesStatisticsRequest, + model_id: str, + version_name: str, + request: aiwork_space_20210204_models.DeleteModelVersionLabelsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.GetCodeSourcesStatisticsResponse: + ) -> aiwork_space_20210204_models.DeleteModelVersionLabelsResponse: + """ + @summary 删除模型版本的标签 + + @param request: DeleteModelVersionLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteModelVersionLabelsResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.workspace_id): - query['WorkspaceId'] = request.workspace_id + if not UtilClient.is_unset(request.keys): + query['Keys'] = request.keys + if not UtilClient.is_unset(request.label_keys): + query['LabelKeys'] = request.label_keys req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='GetCodeSourcesStatistics', + action='DeleteModelVersionLabels', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/statistics/codesources', - method='GET', + pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}/versions/{OpenApiUtilClient.get_encode_param(version_name)}/labels', + method='DELETE', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.GetCodeSourcesStatisticsResponse(), + aiwork_space_20210204_models.DeleteModelVersionLabelsResponse(), self.call_api(params, req, runtime) ) - async def get_code_sources_statistics_with_options_async( + async def delete_model_version_labels_with_options_async( self, - request: aiwork_space_20210204_models.GetCodeSourcesStatisticsRequest, + model_id: str, + version_name: str, + request: aiwork_space_20210204_models.DeleteModelVersionLabelsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.GetCodeSourcesStatisticsResponse: + ) -> aiwork_space_20210204_models.DeleteModelVersionLabelsResponse: + """ + @summary 删除模型版本的标签 + + @param request: DeleteModelVersionLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteModelVersionLabelsResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.workspace_id): - query['WorkspaceId'] = request.workspace_id + if not UtilClient.is_unset(request.keys): + query['Keys'] = request.keys + if not UtilClient.is_unset(request.label_keys): + query['LabelKeys'] = request.label_keys req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='GetCodeSourcesStatistics', + action='DeleteModelVersionLabels', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/statistics/codesources', - method='GET', + pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}/versions/{OpenApiUtilClient.get_encode_param(version_name)}/labels', + method='DELETE', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.GetCodeSourcesStatisticsResponse(), + aiwork_space_20210204_models.DeleteModelVersionLabelsResponse(), await self.call_api_async(params, req, runtime) ) - def get_code_sources_statistics( + def delete_model_version_labels( self, - request: aiwork_space_20210204_models.GetCodeSourcesStatisticsRequest, - ) -> aiwork_space_20210204_models.GetCodeSourcesStatisticsResponse: + model_id: str, + version_name: str, + request: aiwork_space_20210204_models.DeleteModelVersionLabelsRequest, + ) -> aiwork_space_20210204_models.DeleteModelVersionLabelsResponse: + """ + @summary 删除模型版本的标签 + + @param request: DeleteModelVersionLabelsRequest + @return: DeleteModelVersionLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.get_code_sources_statistics_with_options(request, headers, runtime) + return self.delete_model_version_labels_with_options(model_id, version_name, request, headers, runtime) - async def get_code_sources_statistics_async( + async def delete_model_version_labels_async( self, - request: aiwork_space_20210204_models.GetCodeSourcesStatisticsRequest, - ) -> aiwork_space_20210204_models.GetCodeSourcesStatisticsResponse: + model_id: str, + version_name: str, + request: aiwork_space_20210204_models.DeleteModelVersionLabelsRequest, + ) -> aiwork_space_20210204_models.DeleteModelVersionLabelsResponse: + """ + @summary 删除模型版本的标签 + + @param request: DeleteModelVersionLabelsRequest + @return: DeleteModelVersionLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.get_code_sources_statistics_with_options_async(request, headers, runtime) + return await self.delete_model_version_labels_with_options_async(model_id, version_name, request, headers, + runtime) - def get_dataset_with_options( + def delete_service_template_with_options( self, - dataset_id: str, + service_template_id: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.GetDatasetResponse: + ) -> aiwork_space_20210204_models.DeleteServiceTemplateResponse: + """ + @summary 删除服务模版 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteServiceTemplateResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) params = open_api_models.Params( - action='GetDataset', + action='DeleteServiceTemplate', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/datasets/{OpenApiUtilClient.get_encode_param(dataset_id)}', - method='GET', + pathname=f'/api/v1/servicetemplates/{OpenApiUtilClient.get_encode_param(service_template_id)}', + method='DELETE', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.GetDatasetResponse(), + aiwork_space_20210204_models.DeleteServiceTemplateResponse(), self.call_api(params, req, runtime) ) - async def get_dataset_with_options_async( + async def delete_service_template_with_options_async( self, - dataset_id: str, + service_template_id: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.GetDatasetResponse: + ) -> aiwork_space_20210204_models.DeleteServiceTemplateResponse: + """ + @summary 删除服务模版 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteServiceTemplateResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) params = open_api_models.Params( - action='GetDataset', + action='DeleteServiceTemplate', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/datasets/{OpenApiUtilClient.get_encode_param(dataset_id)}', - method='GET', + pathname=f'/api/v1/servicetemplates/{OpenApiUtilClient.get_encode_param(service_template_id)}', + method='DELETE', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.GetDatasetResponse(), + aiwork_space_20210204_models.DeleteServiceTemplateResponse(), await self.call_api_async(params, req, runtime) ) - def get_dataset( + def delete_service_template( self, - dataset_id: str, - ) -> aiwork_space_20210204_models.GetDatasetResponse: + service_template_id: str, + ) -> aiwork_space_20210204_models.DeleteServiceTemplateResponse: + """ + @summary 删除服务模版 + + @return: DeleteServiceTemplateResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.get_dataset_with_options(dataset_id, headers, runtime) + return self.delete_service_template_with_options(service_template_id, headers, runtime) - async def get_dataset_async( + async def delete_service_template_async( self, - dataset_id: str, - ) -> aiwork_space_20210204_models.GetDatasetResponse: + service_template_id: str, + ) -> aiwork_space_20210204_models.DeleteServiceTemplateResponse: + """ + @summary 删除服务模版 + + @return: DeleteServiceTemplateResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.get_dataset_with_options_async(dataset_id, headers, runtime) + return await self.delete_service_template_with_options_async(service_template_id, headers, runtime) - def get_datasets_statistics_with_options( + def delete_service_template_labels_with_options( self, - request: aiwork_space_20210204_models.GetDatasetsStatisticsRequest, + service_template_id: str, + request: aiwork_space_20210204_models.DeleteServiceTemplateLabelsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.GetDatasetsStatisticsResponse: + ) -> aiwork_space_20210204_models.DeleteServiceTemplateLabelsResponse: + """ + @summary 删除服务模版的标签 + + @param request: DeleteServiceTemplateLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteServiceTemplateLabelsResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.workspace_id): - query['WorkspaceId'] = request.workspace_id + if not UtilClient.is_unset(request.label_keys): + query['LabelKeys'] = request.label_keys req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='GetDatasetsStatistics', + action='DeleteServiceTemplateLabels', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/statistics/datasets', - method='GET', + pathname=f'/api/v1/servicetemplates/{OpenApiUtilClient.get_encode_param(service_template_id)}/labels', + method='DELETE', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.GetDatasetsStatisticsResponse(), + aiwork_space_20210204_models.DeleteServiceTemplateLabelsResponse(), self.call_api(params, req, runtime) ) - async def get_datasets_statistics_with_options_async( + async def delete_service_template_labels_with_options_async( self, - request: aiwork_space_20210204_models.GetDatasetsStatisticsRequest, + service_template_id: str, + request: aiwork_space_20210204_models.DeleteServiceTemplateLabelsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.GetDatasetsStatisticsResponse: + ) -> aiwork_space_20210204_models.DeleteServiceTemplateLabelsResponse: + """ + @summary 删除服务模版的标签 + + @param request: DeleteServiceTemplateLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteServiceTemplateLabelsResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.workspace_id): - query['WorkspaceId'] = request.workspace_id + if not UtilClient.is_unset(request.label_keys): + query['LabelKeys'] = request.label_keys req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='GetDatasetsStatistics', + action='DeleteServiceTemplateLabels', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/statistics/datasets', - method='GET', + pathname=f'/api/v1/servicetemplates/{OpenApiUtilClient.get_encode_param(service_template_id)}/labels', + method='DELETE', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.GetDatasetsStatisticsResponse(), + aiwork_space_20210204_models.DeleteServiceTemplateLabelsResponse(), await self.call_api_async(params, req, runtime) ) - def get_datasets_statistics( + def delete_service_template_labels( self, - request: aiwork_space_20210204_models.GetDatasetsStatisticsRequest, - ) -> aiwork_space_20210204_models.GetDatasetsStatisticsResponse: + service_template_id: str, + request: aiwork_space_20210204_models.DeleteServiceTemplateLabelsRequest, + ) -> aiwork_space_20210204_models.DeleteServiceTemplateLabelsResponse: + """ + @summary 删除服务模版的标签 + + @param request: DeleteServiceTemplateLabelsRequest + @return: DeleteServiceTemplateLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.get_datasets_statistics_with_options(request, headers, runtime) + return self.delete_service_template_labels_with_options(service_template_id, request, headers, runtime) - async def get_datasets_statistics_async( + async def delete_service_template_labels_async( self, - request: aiwork_space_20210204_models.GetDatasetsStatisticsRequest, - ) -> aiwork_space_20210204_models.GetDatasetsStatisticsResponse: + service_template_id: str, + request: aiwork_space_20210204_models.DeleteServiceTemplateLabelsRequest, + ) -> aiwork_space_20210204_models.DeleteServiceTemplateLabelsResponse: + """ + @summary 删除服务模版的标签 + + @param request: DeleteServiceTemplateLabelsRequest + @return: DeleteServiceTemplateLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.get_datasets_statistics_with_options_async(request, headers, runtime) + return await self.delete_service_template_labels_with_options_async(service_template_id, request, headers, + runtime) - def get_default_workspace_with_options( + def delete_user_config_with_options( self, - request: aiwork_space_20210204_models.GetDefaultWorkspaceRequest, + category_name: str, + request: aiwork_space_20210204_models.DeleteUserConfigRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.GetDefaultWorkspaceResponse: + ) -> aiwork_space_20210204_models.DeleteUserConfigResponse: + """ + @summary 删除用户配置 + + @param request: DeleteUserConfigRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteUserConfigResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.verbose): - query['Verbose'] = request.verbose + if not UtilClient.is_unset(request.config_key): + query['ConfigKey'] = request.config_key req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='GetDefaultWorkspace', + action='DeleteUserConfig', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/defaultWorkspaces', - method='GET', + pathname=f'/api/v1/userconfigs/{OpenApiUtilClient.get_encode_param(category_name)}', + method='DELETE', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.GetDefaultWorkspaceResponse(), + aiwork_space_20210204_models.DeleteUserConfigResponse(), self.call_api(params, req, runtime) ) - async def get_default_workspace_with_options_async( + async def delete_user_config_with_options_async( self, - request: aiwork_space_20210204_models.GetDefaultWorkspaceRequest, + category_name: str, + request: aiwork_space_20210204_models.DeleteUserConfigRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.GetDefaultWorkspaceResponse: + ) -> aiwork_space_20210204_models.DeleteUserConfigResponse: + """ + @summary 删除用户配置 + + @param request: DeleteUserConfigRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteUserConfigResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.verbose): - query['Verbose'] = request.verbose + if not UtilClient.is_unset(request.config_key): + query['ConfigKey'] = request.config_key req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='GetDefaultWorkspace', + action='DeleteUserConfig', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/defaultWorkspaces', - method='GET', + pathname=f'/api/v1/userconfigs/{OpenApiUtilClient.get_encode_param(category_name)}', + method='DELETE', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.GetDefaultWorkspaceResponse(), + aiwork_space_20210204_models.DeleteUserConfigResponse(), await self.call_api_async(params, req, runtime) ) - def get_default_workspace( + def delete_user_config( self, - request: aiwork_space_20210204_models.GetDefaultWorkspaceRequest, - ) -> aiwork_space_20210204_models.GetDefaultWorkspaceResponse: + category_name: str, + request: aiwork_space_20210204_models.DeleteUserConfigRequest, + ) -> aiwork_space_20210204_models.DeleteUserConfigResponse: + """ + @summary 删除用户配置 + + @param request: DeleteUserConfigRequest + @return: DeleteUserConfigResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.get_default_workspace_with_options(request, headers, runtime) + return self.delete_user_config_with_options(category_name, request, headers, runtime) - async def get_default_workspace_async( + async def delete_user_config_async( self, - request: aiwork_space_20210204_models.GetDefaultWorkspaceRequest, - ) -> aiwork_space_20210204_models.GetDefaultWorkspaceResponse: + category_name: str, + request: aiwork_space_20210204_models.DeleteUserConfigRequest, + ) -> aiwork_space_20210204_models.DeleteUserConfigResponse: + """ + @summary 删除用户配置 + + @param request: DeleteUserConfigRequest + @return: DeleteUserConfigResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.get_default_workspace_with_options_async(request, headers, runtime) + return await self.delete_user_config_with_options_async(category_name, request, headers, runtime) - def get_image_with_options( + def delete_workspace_with_options( self, - image_id: str, - request: aiwork_space_20210204_models.GetImageRequest, + workspace_id: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.GetImageResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.verbose): - query['Verbose'] = request.verbose + ) -> aiwork_space_20210204_models.DeleteWorkspaceResponse: + """ + @summary 删除工作空间 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteWorkspaceResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='GetImage', + action='DeleteWorkspace', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/images/{OpenApiUtilClient.get_encode_param(image_id)}', - method='GET', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}', + method='DELETE', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.GetImageResponse(), + aiwork_space_20210204_models.DeleteWorkspaceResponse(), self.call_api(params, req, runtime) ) - async def get_image_with_options_async( + async def delete_workspace_with_options_async( self, - image_id: str, - request: aiwork_space_20210204_models.GetImageRequest, + workspace_id: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.GetImageResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.verbose): - query['Verbose'] = request.verbose + ) -> aiwork_space_20210204_models.DeleteWorkspaceResponse: + """ + @summary 删除工作空间 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteWorkspaceResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='GetImage', + action='DeleteWorkspace', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/images/{OpenApiUtilClient.get_encode_param(image_id)}', - method='GET', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}', + method='DELETE', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.GetImageResponse(), + aiwork_space_20210204_models.DeleteWorkspaceResponse(), await self.call_api_async(params, req, runtime) ) - def get_image( + def delete_workspace( self, - image_id: str, - request: aiwork_space_20210204_models.GetImageRequest, - ) -> aiwork_space_20210204_models.GetImageResponse: + workspace_id: str, + ) -> aiwork_space_20210204_models.DeleteWorkspaceResponse: + """ + @summary 删除工作空间 + + @return: DeleteWorkspaceResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.get_image_with_options(image_id, request, headers, runtime) + return self.delete_workspace_with_options(workspace_id, headers, runtime) - async def get_image_async( + async def delete_workspace_async( self, - image_id: str, - request: aiwork_space_20210204_models.GetImageRequest, - ) -> aiwork_space_20210204_models.GetImageResponse: + workspace_id: str, + ) -> aiwork_space_20210204_models.DeleteWorkspaceResponse: + """ + @summary 删除工作空间 + + @return: DeleteWorkspaceResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.get_image_with_options_async(image_id, request, headers, runtime) + return await self.delete_workspace_with_options_async(workspace_id, headers, runtime) - def get_images_statistics_with_options( + def delete_workspace_resource_with_options( self, - request: aiwork_space_20210204_models.GetImagesStatisticsRequest, + workspace_id: str, + request: aiwork_space_20210204_models.DeleteWorkspaceResourceRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.GetImagesStatisticsResponse: + ) -> aiwork_space_20210204_models.DeleteWorkspaceResourceResponse: + """ + @summary 删除工作空间资源 + + @param request: DeleteWorkspaceResourceRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteWorkspaceResourceResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.workspace_id): - query['WorkspaceId'] = request.workspace_id + if not UtilClient.is_unset(request.group_name): + query['GroupName'] = request.group_name + if not UtilClient.is_unset(request.labels): + query['Labels'] = request.labels + if not UtilClient.is_unset(request.option): + query['Option'] = request.option + if not UtilClient.is_unset(request.product_type): + query['ProductType'] = request.product_type + if not UtilClient.is_unset(request.resource_ids): + query['ResourceIds'] = request.resource_ids + if not UtilClient.is_unset(request.resource_type): + query['ResourceType'] = request.resource_type req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='GetImagesStatistics', + action='DeleteWorkspaceResource', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/statistics/images', - method='GET', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/resources', + method='DELETE', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.GetImagesStatisticsResponse(), + aiwork_space_20210204_models.DeleteWorkspaceResourceResponse(), self.call_api(params, req, runtime) ) - async def get_images_statistics_with_options_async( + async def delete_workspace_resource_with_options_async( self, - request: aiwork_space_20210204_models.GetImagesStatisticsRequest, + workspace_id: str, + request: aiwork_space_20210204_models.DeleteWorkspaceResourceRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.GetImagesStatisticsResponse: + ) -> aiwork_space_20210204_models.DeleteWorkspaceResourceResponse: + """ + @summary 删除工作空间资源 + + @param request: DeleteWorkspaceResourceRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteWorkspaceResourceResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.workspace_id): - query['WorkspaceId'] = request.workspace_id + if not UtilClient.is_unset(request.group_name): + query['GroupName'] = request.group_name + if not UtilClient.is_unset(request.labels): + query['Labels'] = request.labels + if not UtilClient.is_unset(request.option): + query['Option'] = request.option + if not UtilClient.is_unset(request.product_type): + query['ProductType'] = request.product_type + if not UtilClient.is_unset(request.resource_ids): + query['ResourceIds'] = request.resource_ids + if not UtilClient.is_unset(request.resource_type): + query['ResourceType'] = request.resource_type req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='GetImagesStatistics', + action='DeleteWorkspaceResource', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/statistics/images', - method='GET', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/resources', + method='DELETE', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.GetImagesStatisticsResponse(), + aiwork_space_20210204_models.DeleteWorkspaceResourceResponse(), await self.call_api_async(params, req, runtime) ) - def get_images_statistics( + def delete_workspace_resource( self, - request: aiwork_space_20210204_models.GetImagesStatisticsRequest, - ) -> aiwork_space_20210204_models.GetImagesStatisticsResponse: + workspace_id: str, + request: aiwork_space_20210204_models.DeleteWorkspaceResourceRequest, + ) -> aiwork_space_20210204_models.DeleteWorkspaceResourceResponse: + """ + @summary 删除工作空间资源 + + @param request: DeleteWorkspaceResourceRequest + @return: DeleteWorkspaceResourceResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.get_images_statistics_with_options(request, headers, runtime) + return self.delete_workspace_resource_with_options(workspace_id, request, headers, runtime) - async def get_images_statistics_async( + async def delete_workspace_resource_async( self, - request: aiwork_space_20210204_models.GetImagesStatisticsRequest, - ) -> aiwork_space_20210204_models.GetImagesStatisticsResponse: + workspace_id: str, + request: aiwork_space_20210204_models.DeleteWorkspaceResourceRequest, + ) -> aiwork_space_20210204_models.DeleteWorkspaceResourceResponse: + """ + @summary 删除工作空间资源 + + @param request: DeleteWorkspaceResourceRequest + @return: DeleteWorkspaceResourceResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.get_images_statistics_with_options_async(request, headers, runtime) + return await self.delete_workspace_resource_with_options_async(workspace_id, request, headers, runtime) - def get_member_with_options( + def delete_workspace_roles_with_options( self, workspace_id: str, - request: aiwork_space_20210204_models.GetMemberRequest, + request: aiwork_space_20210204_models.DeleteWorkspaceRolesRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.GetMemberResponse: + ) -> aiwork_space_20210204_models.DeleteWorkspaceRolesResponse: + """ + @summary 批量删除工作空间角色 + + @param request: DeleteWorkspaceRolesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteWorkspaceRolesResponse + """ UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.user_id): - query['UserId'] = request.user_id + body = {} + if not UtilClient.is_unset(request.role_ids): + body['RoleIds'] = request.role_ids req = open_api_models.OpenApiRequest( headers=headers, - query=OpenApiUtilClient.query(query) + body=OpenApiUtilClient.parse_to_map(body) ) params = open_api_models.Params( - action='GetMember', + action='DeleteWorkspaceRoles', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/member', - method='GET', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/roles/action/delete', + method='POST', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.GetMemberResponse(), + aiwork_space_20210204_models.DeleteWorkspaceRolesResponse(), self.call_api(params, req, runtime) ) - async def get_member_with_options_async( + async def delete_workspace_roles_with_options_async( self, workspace_id: str, - request: aiwork_space_20210204_models.GetMemberRequest, + request: aiwork_space_20210204_models.DeleteWorkspaceRolesRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.GetMemberResponse: + ) -> aiwork_space_20210204_models.DeleteWorkspaceRolesResponse: + """ + @summary 批量删除工作空间角色 + + @param request: DeleteWorkspaceRolesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteWorkspaceRolesResponse + """ UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.user_id): - query['UserId'] = request.user_id + body = {} + if not UtilClient.is_unset(request.role_ids): + body['RoleIds'] = request.role_ids req = open_api_models.OpenApiRequest( headers=headers, - query=OpenApiUtilClient.query(query) + body=OpenApiUtilClient.parse_to_map(body) ) params = open_api_models.Params( - action='GetMember', + action='DeleteWorkspaceRoles', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/member', - method='GET', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/roles/action/delete', + method='POST', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.GetMemberResponse(), + aiwork_space_20210204_models.DeleteWorkspaceRolesResponse(), await self.call_api_async(params, req, runtime) ) - def get_member( + def delete_workspace_roles( self, workspace_id: str, - request: aiwork_space_20210204_models.GetMemberRequest, - ) -> aiwork_space_20210204_models.GetMemberResponse: + request: aiwork_space_20210204_models.DeleteWorkspaceRolesRequest, + ) -> aiwork_space_20210204_models.DeleteWorkspaceRolesResponse: + """ + @summary 批量删除工作空间角色 + + @param request: DeleteWorkspaceRolesRequest + @return: DeleteWorkspaceRolesResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.get_member_with_options(workspace_id, request, headers, runtime) + return self.delete_workspace_roles_with_options(workspace_id, request, headers, runtime) - async def get_member_async( + async def delete_workspace_roles_async( self, workspace_id: str, - request: aiwork_space_20210204_models.GetMemberRequest, - ) -> aiwork_space_20210204_models.GetMemberResponse: + request: aiwork_space_20210204_models.DeleteWorkspaceRolesRequest, + ) -> aiwork_space_20210204_models.DeleteWorkspaceRolesResponse: + """ + @summary 批量删除工作空间角色 + + @param request: DeleteWorkspaceRolesRequest + @return: DeleteWorkspaceRolesResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.get_member_with_options_async(workspace_id, request, headers, runtime) + return await self.delete_workspace_roles_with_options_async(workspace_id, request, headers, runtime) - def get_model_with_options( + def describe_pricing_module_with_options( self, - model_id: str, + request: aiwork_space_20210204_models.DescribePricingModuleRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.GetModelResponse: + ) -> aiwork_space_20210204_models.DescribePricingModuleResponse: + """ + @summary 查询阿里云商品对应模块信息 + + @param request: DescribePricingModuleRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DescribePricingModuleResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.product_code): + query['ProductCode'] = request.product_code + if not UtilClient.is_unset(request.product_type): + query['ProductType'] = request.product_type + if not UtilClient.is_unset(request.subscription_type): + query['SubscriptionType'] = request.subscription_type req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='GetModel', + action='DescribePricingModule', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}', + pathname=f'/api/v1/proxy/describepricingmodule', method='GET', auth_type='AK', style='ROA', @@ -3479,24 +5748,41 @@ def get_model_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.GetModelResponse(), + aiwork_space_20210204_models.DescribePricingModuleResponse(), self.call_api(params, req, runtime) ) - async def get_model_with_options_async( + async def describe_pricing_module_with_options_async( self, - model_id: str, + request: aiwork_space_20210204_models.DescribePricingModuleRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.GetModelResponse: + ) -> aiwork_space_20210204_models.DescribePricingModuleResponse: + """ + @summary 查询阿里云商品对应模块信息 + + @param request: DescribePricingModuleRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DescribePricingModuleResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.product_code): + query['ProductCode'] = request.product_code + if not UtilClient.is_unset(request.product_type): + query['ProductType'] = request.product_type + if not UtilClient.is_unset(request.subscription_type): + query['SubscriptionType'] = request.subscription_type req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='GetModel', + action='DescribePricingModule', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}', + pathname=f'/api/v1/proxy/describepricingmodule', method='GET', auth_type='AK', style='ROA', @@ -3504,41 +5790,59 @@ async def get_model_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.GetModelResponse(), + aiwork_space_20210204_models.DescribePricingModuleResponse(), await self.call_api_async(params, req, runtime) ) - def get_model( + def describe_pricing_module( self, - model_id: str, - ) -> aiwork_space_20210204_models.GetModelResponse: + request: aiwork_space_20210204_models.DescribePricingModuleRequest, + ) -> aiwork_space_20210204_models.DescribePricingModuleResponse: + """ + @summary 查询阿里云商品对应模块信息 + + @param request: DescribePricingModuleRequest + @return: DescribePricingModuleResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.get_model_with_options(model_id, headers, runtime) + return self.describe_pricing_module_with_options(request, headers, runtime) - async def get_model_async( + async def describe_pricing_module_async( self, - model_id: str, - ) -> aiwork_space_20210204_models.GetModelResponse: + request: aiwork_space_20210204_models.DescribePricingModuleRequest, + ) -> aiwork_space_20210204_models.DescribePricingModuleResponse: + """ + @summary 查询阿里云商品对应模块信息 + + @param request: DescribePricingModuleRequest + @return: DescribePricingModuleResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.get_model_with_options_async(model_id, headers, runtime) + return await self.describe_pricing_module_with_options_async(request, headers, runtime) - def get_model_version_with_options( + def get_code_source_with_options( self, - model_id: str, - version_name: str, + code_source_id: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.GetModelVersionResponse: + ) -> aiwork_space_20210204_models.GetCodeSourceResponse: + """ + @summary 获取一个代码源配置 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetCodeSourceResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) params = open_api_models.Params( - action='GetModelVersion', + action='GetCodeSource', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}/versions/{OpenApiUtilClient.get_encode_param(version_name)}', + pathname=f'/api/v1/codesources/{OpenApiUtilClient.get_encode_param(code_source_id)}', method='GET', auth_type='AK', style='ROA', @@ -3546,25 +5850,31 @@ def get_model_version_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.GetModelVersionResponse(), + aiwork_space_20210204_models.GetCodeSourceResponse(), self.call_api(params, req, runtime) ) - async def get_model_version_with_options_async( + async def get_code_source_with_options_async( self, - model_id: str, - version_name: str, + code_source_id: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.GetModelVersionResponse: + ) -> aiwork_space_20210204_models.GetCodeSourceResponse: + """ + @summary 获取一个代码源配置 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetCodeSourceResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) params = open_api_models.Params( - action='GetModelVersion', + action='GetCodeSource', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}/versions/{OpenApiUtilClient.get_encode_param(version_name)}', + pathname=f'/api/v1/codesources/{OpenApiUtilClient.get_encode_param(code_source_id)}', method='GET', auth_type='AK', style='ROA', @@ -3572,51 +5882,63 @@ async def get_model_version_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.GetModelVersionResponse(), + aiwork_space_20210204_models.GetCodeSourceResponse(), await self.call_api_async(params, req, runtime) ) - def get_model_version( + def get_code_source( self, - model_id: str, - version_name: str, - ) -> aiwork_space_20210204_models.GetModelVersionResponse: + code_source_id: str, + ) -> aiwork_space_20210204_models.GetCodeSourceResponse: + """ + @summary 获取一个代码源配置 + + @return: GetCodeSourceResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.get_model_version_with_options(model_id, version_name, headers, runtime) + return self.get_code_source_with_options(code_source_id, headers, runtime) - async def get_model_version_async( + async def get_code_source_async( self, - model_id: str, - version_name: str, - ) -> aiwork_space_20210204_models.GetModelVersionResponse: + code_source_id: str, + ) -> aiwork_space_20210204_models.GetCodeSourceResponse: + """ + @summary 获取一个代码源配置 + + @return: GetCodeSourceResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.get_model_version_with_options_async(model_id, version_name, headers, runtime) + return await self.get_code_source_with_options_async(code_source_id, headers, runtime) - def get_permission_with_options( + def get_code_sources_statistics_with_options( self, - workspace_id: str, - permission_code: str, - request: aiwork_space_20210204_models.GetPermissionRequest, + request: aiwork_space_20210204_models.GetCodeSourcesStatisticsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.GetPermissionResponse: + ) -> aiwork_space_20210204_models.GetCodeSourcesStatisticsResponse: + """ + @summary 获取当前工作空间下的CodeSources的统计信息 + + @param request: GetCodeSourcesStatisticsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetCodeSourcesStatisticsResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.accessibility): - query['Accessibility'] = request.accessibility - if not UtilClient.is_unset(request.creator): - query['Creator'] = request.creator + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='GetPermission', + action='GetCodeSourcesStatistics', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/permissions/{OpenApiUtilClient.get_encode_param(permission_code)}', + pathname=f'/api/v1/statistics/codesources', method='GET', auth_type='AK', style='ROA', @@ -3624,33 +5946,37 @@ def get_permission_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.GetPermissionResponse(), + aiwork_space_20210204_models.GetCodeSourcesStatisticsResponse(), self.call_api(params, req, runtime) ) - async def get_permission_with_options_async( + async def get_code_sources_statistics_with_options_async( self, - workspace_id: str, - permission_code: str, - request: aiwork_space_20210204_models.GetPermissionRequest, + request: aiwork_space_20210204_models.GetCodeSourcesStatisticsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.GetPermissionResponse: + ) -> aiwork_space_20210204_models.GetCodeSourcesStatisticsResponse: + """ + @summary 获取当前工作空间下的CodeSources的统计信息 + + @param request: GetCodeSourcesStatisticsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetCodeSourcesStatisticsResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.accessibility): - query['Accessibility'] = request.accessibility - if not UtilClient.is_unset(request.creator): - query['Creator'] = request.creator + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='GetPermission', + action='GetCodeSourcesStatistics', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/permissions/{OpenApiUtilClient.get_encode_param(permission_code)}', + pathname=f'/api/v1/statistics/codesources', method='GET', auth_type='AK', style='ROA', @@ -3658,49 +5984,59 @@ async def get_permission_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.GetPermissionResponse(), + aiwork_space_20210204_models.GetCodeSourcesStatisticsResponse(), await self.call_api_async(params, req, runtime) ) - def get_permission( + def get_code_sources_statistics( self, - workspace_id: str, - permission_code: str, - request: aiwork_space_20210204_models.GetPermissionRequest, - ) -> aiwork_space_20210204_models.GetPermissionResponse: + request: aiwork_space_20210204_models.GetCodeSourcesStatisticsRequest, + ) -> aiwork_space_20210204_models.GetCodeSourcesStatisticsResponse: + """ + @summary 获取当前工作空间下的CodeSources的统计信息 + + @param request: GetCodeSourcesStatisticsRequest + @return: GetCodeSourcesStatisticsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.get_permission_with_options(workspace_id, permission_code, request, headers, runtime) + return self.get_code_sources_statistics_with_options(request, headers, runtime) - async def get_permission_async( + async def get_code_sources_statistics_async( self, - workspace_id: str, - permission_code: str, - request: aiwork_space_20210204_models.GetPermissionRequest, - ) -> aiwork_space_20210204_models.GetPermissionResponse: + request: aiwork_space_20210204_models.GetCodeSourcesStatisticsRequest, + ) -> aiwork_space_20210204_models.GetCodeSourcesStatisticsResponse: + """ + @summary 获取当前工作空间下的CodeSources的统计信息 + + @param request: GetCodeSourcesStatisticsRequest + @return: GetCodeSourcesStatisticsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.get_permission_with_options_async(workspace_id, permission_code, request, headers, runtime) + return await self.get_code_sources_statistics_with_options_async(request, headers, runtime) - def get_role_statistics_with_options( + def get_collection_with_options( self, - request: aiwork_space_20210204_models.GetRoleStatisticsRequest, + collection_name: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.GetRoleStatisticsResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.workspace_id): - query['WorkspaceId'] = request.workspace_id + ) -> aiwork_space_20210204_models.GetCollectionResponse: + """ + @summary 获取Collection + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetCollectionResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='GetRoleStatistics', + action='GetCollection', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/statistics/roles', + pathname=f'/api/v1/collections/{OpenApiUtilClient.get_encode_param(collection_name)}', method='GET', auth_type='AK', style='ROA', @@ -3708,29 +6044,31 @@ def get_role_statistics_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.GetRoleStatisticsResponse(), + aiwork_space_20210204_models.GetCollectionResponse(), self.call_api(params, req, runtime) ) - async def get_role_statistics_with_options_async( + async def get_collection_with_options_async( self, - request: aiwork_space_20210204_models.GetRoleStatisticsRequest, + collection_name: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.GetRoleStatisticsResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.workspace_id): - query['WorkspaceId'] = request.workspace_id + ) -> aiwork_space_20210204_models.GetCollectionResponse: + """ + @summary 获取Collection + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetCollectionResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='GetRoleStatistics', + action='GetCollection', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/statistics/roles', + pathname=f'/api/v1/collections/{OpenApiUtilClient.get_encode_param(collection_name)}', method='GET', auth_type='AK', style='ROA', @@ -3738,46 +6076,57 @@ async def get_role_statistics_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.GetRoleStatisticsResponse(), + aiwork_space_20210204_models.GetCollectionResponse(), await self.call_api_async(params, req, runtime) ) - def get_role_statistics( + def get_collection( self, - request: aiwork_space_20210204_models.GetRoleStatisticsRequest, - ) -> aiwork_space_20210204_models.GetRoleStatisticsResponse: + collection_name: str, + ) -> aiwork_space_20210204_models.GetCollectionResponse: + """ + @summary 获取Collection + + @return: GetCollectionResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.get_role_statistics_with_options(request, headers, runtime) + return self.get_collection_with_options(collection_name, headers, runtime) - async def get_role_statistics_async( + async def get_collection_async( self, - request: aiwork_space_20210204_models.GetRoleStatisticsRequest, - ) -> aiwork_space_20210204_models.GetRoleStatisticsResponse: + collection_name: str, + ) -> aiwork_space_20210204_models.GetCollectionResponse: + """ + @summary 获取Collection + + @return: GetCollectionResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.get_role_statistics_with_options_async(request, headers, runtime) + return await self.get_collection_with_options_async(collection_name, headers, runtime) - def get_workspace_with_options( + def get_dataset_with_options( self, - workspace_id: str, - request: aiwork_space_20210204_models.GetWorkspaceRequest, + dataset_id: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.GetWorkspaceResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.verbose): - query['Verbose'] = request.verbose + ) -> aiwork_space_20210204_models.GetDatasetResponse: + """ + @summary 获取数据集 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetDatasetResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='GetWorkspace', + action='GetDataset', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}', + pathname=f'/api/v1/datasets/{OpenApiUtilClient.get_encode_param(dataset_id)}', method='GET', auth_type='AK', style='ROA', @@ -3785,30 +6134,31 @@ def get_workspace_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.GetWorkspaceResponse(), + aiwork_space_20210204_models.GetDatasetResponse(), self.call_api(params, req, runtime) ) - async def get_workspace_with_options_async( + async def get_dataset_with_options_async( self, - workspace_id: str, - request: aiwork_space_20210204_models.GetWorkspaceRequest, + dataset_id: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.GetWorkspaceResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.verbose): - query['Verbose'] = request.verbose + ) -> aiwork_space_20210204_models.GetDatasetResponse: + """ + @summary 获取数据集 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetDatasetResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='GetWorkspace', + action='GetDataset', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}', + pathname=f'/api/v1/datasets/{OpenApiUtilClient.get_encode_param(dataset_id)}', method='GET', auth_type='AK', style='ROA', @@ -3816,46 +6166,52 @@ async def get_workspace_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.GetWorkspaceResponse(), + aiwork_space_20210204_models.GetDatasetResponse(), await self.call_api_async(params, req, runtime) ) - def get_workspace( + def get_dataset( self, - workspace_id: str, - request: aiwork_space_20210204_models.GetWorkspaceRequest, - ) -> aiwork_space_20210204_models.GetWorkspaceResponse: + dataset_id: str, + ) -> aiwork_space_20210204_models.GetDatasetResponse: + """ + @summary 获取数据集 + + @return: GetDatasetResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.get_workspace_with_options(workspace_id, request, headers, runtime) + return self.get_dataset_with_options(dataset_id, headers, runtime) - async def get_workspace_async( + async def get_dataset_async( self, - workspace_id: str, - request: aiwork_space_20210204_models.GetWorkspaceRequest, - ) -> aiwork_space_20210204_models.GetWorkspaceResponse: + dataset_id: str, + ) -> aiwork_space_20210204_models.GetDatasetResponse: + """ + @summary 获取数据集 + + @return: GetDatasetResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.get_workspace_with_options_async(workspace_id, request, headers, runtime) + return await self.get_dataset_with_options_async(dataset_id, headers, runtime) - def list_code_sources_with_options( + def get_datasets_statistics_with_options( self, - request: aiwork_space_20210204_models.ListCodeSourcesRequest, + request: aiwork_space_20210204_models.GetDatasetsStatisticsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListCodeSourcesResponse: + ) -> aiwork_space_20210204_models.GetDatasetsStatisticsResponse: + """ + @summary 获取数据集总数 + + @param request: GetDatasetsStatisticsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetDatasetsStatisticsResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.display_name): - query['DisplayName'] = request.display_name - if not UtilClient.is_unset(request.order): - query['Order'] = request.order - if not UtilClient.is_unset(request.page_number): - query['PageNumber'] = request.page_number - if not UtilClient.is_unset(request.page_size): - query['PageSize'] = request.page_size - if not UtilClient.is_unset(request.sort_by): - query['SortBy'] = request.sort_by if not UtilClient.is_unset(request.workspace_id): query['WorkspaceId'] = request.workspace_id req = open_api_models.OpenApiRequest( @@ -3863,10 +6219,10 @@ def list_code_sources_with_options( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListCodeSources', + action='GetDatasetsStatistics', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/codesources', + pathname=f'/api/v1/statistics/datasets', method='GET', auth_type='AK', style='ROA', @@ -3874,28 +6230,26 @@ def list_code_sources_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListCodeSourcesResponse(), + aiwork_space_20210204_models.GetDatasetsStatisticsResponse(), self.call_api(params, req, runtime) ) - async def list_code_sources_with_options_async( + async def get_datasets_statistics_with_options_async( self, - request: aiwork_space_20210204_models.ListCodeSourcesRequest, + request: aiwork_space_20210204_models.GetDatasetsStatisticsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListCodeSourcesResponse: + ) -> aiwork_space_20210204_models.GetDatasetsStatisticsResponse: + """ + @summary 获取数据集总数 + + @param request: GetDatasetsStatisticsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetDatasetsStatisticsResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.display_name): - query['DisplayName'] = request.display_name - if not UtilClient.is_unset(request.order): - query['Order'] = request.order - if not UtilClient.is_unset(request.page_number): - query['PageNumber'] = request.page_number - if not UtilClient.is_unset(request.page_size): - query['PageSize'] = request.page_size - if not UtilClient.is_unset(request.sort_by): - query['SortBy'] = request.sort_by if not UtilClient.is_unset(request.workspace_id): query['WorkspaceId'] = request.workspace_id req = open_api_models.OpenApiRequest( @@ -3903,10 +6257,10 @@ async def list_code_sources_with_options_async( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListCodeSources', + action='GetDatasetsStatistics', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/codesources', + pathname=f'/api/v1/statistics/datasets', method='GET', auth_type='AK', style='ROA', @@ -3914,46 +6268,60 @@ async def list_code_sources_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListCodeSourcesResponse(), + aiwork_space_20210204_models.GetDatasetsStatisticsResponse(), await self.call_api_async(params, req, runtime) ) - def list_code_sources( + def get_datasets_statistics( self, - request: aiwork_space_20210204_models.ListCodeSourcesRequest, - ) -> aiwork_space_20210204_models.ListCodeSourcesResponse: + request: aiwork_space_20210204_models.GetDatasetsStatisticsRequest, + ) -> aiwork_space_20210204_models.GetDatasetsStatisticsResponse: + """ + @summary 获取数据集总数 + + @param request: GetDatasetsStatisticsRequest + @return: GetDatasetsStatisticsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.list_code_sources_with_options(request, headers, runtime) + return self.get_datasets_statistics_with_options(request, headers, runtime) - async def list_code_sources_async( + async def get_datasets_statistics_async( self, - request: aiwork_space_20210204_models.ListCodeSourcesRequest, - ) -> aiwork_space_20210204_models.ListCodeSourcesResponse: + request: aiwork_space_20210204_models.GetDatasetsStatisticsRequest, + ) -> aiwork_space_20210204_models.GetDatasetsStatisticsResponse: + """ + @summary 获取数据集总数 + + @param request: GetDatasetsStatisticsRequest + @return: GetDatasetsStatisticsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.list_code_sources_with_options_async(request, headers, runtime) + return await self.get_datasets_statistics_with_options_async(request, headers, runtime) - def list_configs_with_options( + def get_dataset_version_with_options( self, - workspace_id: str, - request: aiwork_space_20210204_models.ListConfigsRequest, + dataset_id: str, + version_name: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListConfigsResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.config_keys): - query['ConfigKeys'] = request.config_keys + ) -> aiwork_space_20210204_models.GetDatasetVersionResponse: + """ + @summary 获取指定版本的数据集信息 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetDatasetVersionResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='ListConfigs', + action='GetDatasetVersion', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/configs', + pathname=f'/api/v1/datasets/{OpenApiUtilClient.get_encode_param(dataset_id)}/versions/{OpenApiUtilClient.get_encode_param(version_name)}', method='GET', auth_type='AK', style='ROA', @@ -3961,30 +6329,32 @@ def list_configs_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListConfigsResponse(), + aiwork_space_20210204_models.GetDatasetVersionResponse(), self.call_api(params, req, runtime) ) - async def list_configs_with_options_async( + async def get_dataset_version_with_options_async( self, - workspace_id: str, - request: aiwork_space_20210204_models.ListConfigsRequest, + dataset_id: str, + version_name: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListConfigsResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.config_keys): - query['ConfigKeys'] = request.config_keys + ) -> aiwork_space_20210204_models.GetDatasetVersionResponse: + """ + @summary 获取指定版本的数据集信息 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetDatasetVersionResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='ListConfigs', + action='GetDatasetVersion', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/configs', + pathname=f'/api/v1/datasets/{OpenApiUtilClient.get_encode_param(dataset_id)}/versions/{OpenApiUtilClient.get_encode_param(version_name)}', method='GET', auth_type='AK', style='ROA', @@ -3992,71 +6362,65 @@ async def list_configs_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListConfigsResponse(), + aiwork_space_20210204_models.GetDatasetVersionResponse(), await self.call_api_async(params, req, runtime) ) - def list_configs( + def get_dataset_version( self, - workspace_id: str, - request: aiwork_space_20210204_models.ListConfigsRequest, - ) -> aiwork_space_20210204_models.ListConfigsResponse: + dataset_id: str, + version_name: str, + ) -> aiwork_space_20210204_models.GetDatasetVersionResponse: + """ + @summary 获取指定版本的数据集信息 + + @return: GetDatasetVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.list_configs_with_options(workspace_id, request, headers, runtime) + return self.get_dataset_version_with_options(dataset_id, version_name, headers, runtime) - async def list_configs_async( + async def get_dataset_version_async( self, - workspace_id: str, - request: aiwork_space_20210204_models.ListConfigsRequest, - ) -> aiwork_space_20210204_models.ListConfigsResponse: + dataset_id: str, + version_name: str, + ) -> aiwork_space_20210204_models.GetDatasetVersionResponse: + """ + @summary 获取指定版本的数据集信息 + + @return: GetDatasetVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.list_configs_with_options_async(workspace_id, request, headers, runtime) + return await self.get_dataset_version_with_options_async(dataset_id, version_name, headers, runtime) - def list_datasets_with_options( + def get_default_workspace_with_options( self, - request: aiwork_space_20210204_models.ListDatasetsRequest, + request: aiwork_space_20210204_models.GetDefaultWorkspaceRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListDatasetsResponse: + ) -> aiwork_space_20210204_models.GetDefaultWorkspaceResponse: + """ + @summary 获取默认工作空间 + + @param request: GetDefaultWorkspaceRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetDefaultWorkspaceResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.data_source_types): - query['DataSourceTypes'] = request.data_source_types - if not UtilClient.is_unset(request.data_types): - query['DataTypes'] = request.data_types - if not UtilClient.is_unset(request.label): - query['Label'] = request.label - if not UtilClient.is_unset(request.label_keys): - query['LabelKeys'] = request.label_keys - if not UtilClient.is_unset(request.label_values): - query['LabelValues'] = request.label_values - if not UtilClient.is_unset(request.name): - query['Name'] = request.name - if not UtilClient.is_unset(request.order): - query['Order'] = request.order - if not UtilClient.is_unset(request.page_number): - query['PageNumber'] = request.page_number - if not UtilClient.is_unset(request.page_size): - query['PageSize'] = request.page_size - if not UtilClient.is_unset(request.properties): - query['Properties'] = request.properties - if not UtilClient.is_unset(request.source_id): - query['SourceId'] = request.source_id - if not UtilClient.is_unset(request.source_types): - query['SourceTypes'] = request.source_types - if not UtilClient.is_unset(request.workspace_id): - query['WorkspaceId'] = request.workspace_id + if not UtilClient.is_unset(request.verbose): + query['Verbose'] = request.verbose req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListDatasets', + action='GetDefaultWorkspace', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/datasets', + pathname=f'/api/v1/defaultWorkspaces', method='GET', auth_type='AK', style='ROA', @@ -4064,53 +6428,37 @@ def list_datasets_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListDatasetsResponse(), + aiwork_space_20210204_models.GetDefaultWorkspaceResponse(), self.call_api(params, req, runtime) ) - async def list_datasets_with_options_async( + async def get_default_workspace_with_options_async( self, - request: aiwork_space_20210204_models.ListDatasetsRequest, + request: aiwork_space_20210204_models.GetDefaultWorkspaceRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListDatasetsResponse: + ) -> aiwork_space_20210204_models.GetDefaultWorkspaceResponse: + """ + @summary 获取默认工作空间 + + @param request: GetDefaultWorkspaceRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetDefaultWorkspaceResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.data_source_types): - query['DataSourceTypes'] = request.data_source_types - if not UtilClient.is_unset(request.data_types): - query['DataTypes'] = request.data_types - if not UtilClient.is_unset(request.label): - query['Label'] = request.label - if not UtilClient.is_unset(request.label_keys): - query['LabelKeys'] = request.label_keys - if not UtilClient.is_unset(request.label_values): - query['LabelValues'] = request.label_values - if not UtilClient.is_unset(request.name): - query['Name'] = request.name - if not UtilClient.is_unset(request.order): - query['Order'] = request.order - if not UtilClient.is_unset(request.page_number): - query['PageNumber'] = request.page_number - if not UtilClient.is_unset(request.page_size): - query['PageSize'] = request.page_size - if not UtilClient.is_unset(request.properties): - query['Properties'] = request.properties - if not UtilClient.is_unset(request.source_id): - query['SourceId'] = request.source_id - if not UtilClient.is_unset(request.source_types): - query['SourceTypes'] = request.source_types - if not UtilClient.is_unset(request.workspace_id): - query['WorkspaceId'] = request.workspace_id + if not UtilClient.is_unset(request.verbose): + query['Verbose'] = request.verbose req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListDatasets', + action='GetDefaultWorkspace', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/datasets', + pathname=f'/api/v1/defaultWorkspaces', method='GET', auth_type='AK', style='ROA', @@ -4118,45 +6466,59 @@ async def list_datasets_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListDatasetsResponse(), + aiwork_space_20210204_models.GetDefaultWorkspaceResponse(), await self.call_api_async(params, req, runtime) ) - def list_datasets( + def get_default_workspace( self, - request: aiwork_space_20210204_models.ListDatasetsRequest, - ) -> aiwork_space_20210204_models.ListDatasetsResponse: + request: aiwork_space_20210204_models.GetDefaultWorkspaceRequest, + ) -> aiwork_space_20210204_models.GetDefaultWorkspaceResponse: + """ + @summary 获取默认工作空间 + + @param request: GetDefaultWorkspaceRequest + @return: GetDefaultWorkspaceResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.list_datasets_with_options(request, headers, runtime) + return self.get_default_workspace_with_options(request, headers, runtime) - async def list_datasets_async( + async def get_default_workspace_async( self, - request: aiwork_space_20210204_models.ListDatasetsRequest, - ) -> aiwork_space_20210204_models.ListDatasetsResponse: + request: aiwork_space_20210204_models.GetDefaultWorkspaceRequest, + ) -> aiwork_space_20210204_models.GetDefaultWorkspaceResponse: + """ + @summary 获取默认工作空间 + + @param request: GetDefaultWorkspaceRequest + @return: GetDefaultWorkspaceResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.list_datasets_with_options_async(request, headers, runtime) + return await self.get_default_workspace_with_options_async(request, headers, runtime) - def list_features_with_options( + def get_experiment_with_options( self, - request: aiwork_space_20210204_models.ListFeaturesRequest, + experiment_id: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListFeaturesResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.names): - query['Names'] = request.names + ) -> aiwork_space_20210204_models.GetExperimentResponse: + """ + @summary 获取实验 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetExperimentResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='ListFeatures', + action='GetExperiment', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/features', + pathname=f'/api/v1/experiments/{OpenApiUtilClient.get_encode_param(experiment_id)}', method='GET', auth_type='AK', style='ROA', @@ -4164,29 +6526,31 @@ def list_features_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListFeaturesResponse(), + aiwork_space_20210204_models.GetExperimentResponse(), self.call_api(params, req, runtime) ) - async def list_features_with_options_async( + async def get_experiment_with_options_async( self, - request: aiwork_space_20210204_models.ListFeaturesRequest, + experiment_id: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListFeaturesResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.names): - query['Names'] = request.names + ) -> aiwork_space_20210204_models.GetExperimentResponse: + """ + @summary 获取实验 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetExperimentResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='ListFeatures', + action='GetExperiment', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/features', + pathname=f'/api/v1/experiments/{OpenApiUtilClient.get_encode_param(experiment_id)}', method='GET', auth_type='AK', style='ROA', @@ -4194,39 +6558,64 @@ async def list_features_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListFeaturesResponse(), + aiwork_space_20210204_models.GetExperimentResponse(), await self.call_api_async(params, req, runtime) ) - def list_features( + def get_experiment( self, - request: aiwork_space_20210204_models.ListFeaturesRequest, - ) -> aiwork_space_20210204_models.ListFeaturesResponse: + experiment_id: str, + ) -> aiwork_space_20210204_models.GetExperimentResponse: + """ + @summary 获取实验 + + @return: GetExperimentResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.list_features_with_options(request, headers, runtime) + return self.get_experiment_with_options(experiment_id, headers, runtime) - async def list_features_async( + async def get_experiment_async( self, - request: aiwork_space_20210204_models.ListFeaturesRequest, - ) -> aiwork_space_20210204_models.ListFeaturesResponse: + experiment_id: str, + ) -> aiwork_space_20210204_models.GetExperimentResponse: + """ + @summary 获取实验 + + @return: GetExperimentResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.list_features_with_options_async(request, headers, runtime) + return await self.get_experiment_with_options_async(experiment_id, headers, runtime) - def list_global_permissions_with_options( + def get_image_with_options( self, + image_id: str, + request: aiwork_space_20210204_models.GetImageRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListGlobalPermissionsResponse: + ) -> aiwork_space_20210204_models.GetImageResponse: + """ + @summary 获取镜像 + + @param request: GetImageRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetImageResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.verbose): + query['Verbose'] = request.verbose req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListGlobalPermissions', + action='GetImage', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/permissions', + pathname=f'/api/v1/images/{OpenApiUtilClient.get_encode_param(image_id)}', method='GET', auth_type='AK', style='ROA', @@ -4234,23 +6623,38 @@ def list_global_permissions_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListGlobalPermissionsResponse(), + aiwork_space_20210204_models.GetImageResponse(), self.call_api(params, req, runtime) ) - async def list_global_permissions_with_options_async( + async def get_image_with_options_async( self, + image_id: str, + request: aiwork_space_20210204_models.GetImageRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListGlobalPermissionsResponse: + ) -> aiwork_space_20210204_models.GetImageResponse: + """ + @summary 获取镜像 + + @param request: GetImageRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetImageResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.verbose): + query['Verbose'] = request.verbose req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListGlobalPermissions', + action='GetImage', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/permissions', + pathname=f'/api/v1/images/{OpenApiUtilClient.get_encode_param(image_id)}', method='GET', auth_type='AK', style='ROA', @@ -4258,36 +6662,56 @@ async def list_global_permissions_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListGlobalPermissionsResponse(), + aiwork_space_20210204_models.GetImageResponse(), await self.call_api_async(params, req, runtime) ) - def list_global_permissions(self) -> aiwork_space_20210204_models.ListGlobalPermissionsResponse: + def get_image( + self, + image_id: str, + request: aiwork_space_20210204_models.GetImageRequest, + ) -> aiwork_space_20210204_models.GetImageResponse: + """ + @summary 获取镜像 + + @param request: GetImageRequest + @return: GetImageResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.list_global_permissions_with_options(headers, runtime) + return self.get_image_with_options(image_id, request, headers, runtime) + + async def get_image_async( + self, + image_id: str, + request: aiwork_space_20210204_models.GetImageRequest, + ) -> aiwork_space_20210204_models.GetImageResponse: + """ + @summary 获取镜像 - async def list_global_permissions_async(self) -> aiwork_space_20210204_models.ListGlobalPermissionsResponse: + @param request: GetImageRequest + @return: GetImageResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.list_global_permissions_with_options_async(headers, runtime) + return await self.get_image_with_options_async(image_id, request, headers, runtime) - def list_image_labels_with_options( + def get_images_statistics_with_options( self, - request: aiwork_space_20210204_models.ListImageLabelsRequest, + request: aiwork_space_20210204_models.GetImagesStatisticsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListImageLabelsResponse: + ) -> aiwork_space_20210204_models.GetImagesStatisticsResponse: + """ + @summary 获取镜像统计 + + @param request: GetImagesStatisticsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetImagesStatisticsResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.image_id): - query['ImageId'] = request.image_id - if not UtilClient.is_unset(request.label_filter): - query['LabelFilter'] = request.label_filter - if not UtilClient.is_unset(request.label_keys): - query['LabelKeys'] = request.label_keys - if not UtilClient.is_unset(request.region): - query['Region'] = request.region if not UtilClient.is_unset(request.workspace_id): query['WorkspaceId'] = request.workspace_id req = open_api_models.OpenApiRequest( @@ -4295,10 +6719,10 @@ def list_image_labels_with_options( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListImageLabels', + action='GetImagesStatistics', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/image/labels', + pathname=f'/api/v1/statistics/images', method='GET', auth_type='AK', style='ROA', @@ -4306,26 +6730,26 @@ def list_image_labels_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListImageLabelsResponse(), + aiwork_space_20210204_models.GetImagesStatisticsResponse(), self.call_api(params, req, runtime) ) - async def list_image_labels_with_options_async( + async def get_images_statistics_with_options_async( self, - request: aiwork_space_20210204_models.ListImageLabelsRequest, + request: aiwork_space_20210204_models.GetImagesStatisticsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListImageLabelsResponse: + ) -> aiwork_space_20210204_models.GetImagesStatisticsResponse: + """ + @summary 获取镜像统计 + + @param request: GetImagesStatisticsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetImagesStatisticsResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.image_id): - query['ImageId'] = request.image_id - if not UtilClient.is_unset(request.label_filter): - query['LabelFilter'] = request.label_filter - if not UtilClient.is_unset(request.label_keys): - query['LabelKeys'] = request.label_keys - if not UtilClient.is_unset(request.region): - query['Region'] = request.region if not UtilClient.is_unset(request.workspace_id): query['WorkspaceId'] = request.workspace_id req = open_api_models.OpenApiRequest( @@ -4333,10 +6757,10 @@ async def list_image_labels_with_options_async( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListImageLabels', + action='GetImagesStatistics', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/image/labels', + pathname=f'/api/v1/statistics/images', method='GET', auth_type='AK', style='ROA', @@ -4344,65 +6768,59 @@ async def list_image_labels_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListImageLabelsResponse(), + aiwork_space_20210204_models.GetImagesStatisticsResponse(), await self.call_api_async(params, req, runtime) ) - def list_image_labels( + def get_images_statistics( self, - request: aiwork_space_20210204_models.ListImageLabelsRequest, - ) -> aiwork_space_20210204_models.ListImageLabelsResponse: + request: aiwork_space_20210204_models.GetImagesStatisticsRequest, + ) -> aiwork_space_20210204_models.GetImagesStatisticsResponse: + """ + @summary 获取镜像统计 + + @param request: GetImagesStatisticsRequest + @return: GetImagesStatisticsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.list_image_labels_with_options(request, headers, runtime) + return self.get_images_statistics_with_options(request, headers, runtime) - async def list_image_labels_async( + async def get_images_statistics_async( self, - request: aiwork_space_20210204_models.ListImageLabelsRequest, - ) -> aiwork_space_20210204_models.ListImageLabelsResponse: + request: aiwork_space_20210204_models.GetImagesStatisticsRequest, + ) -> aiwork_space_20210204_models.GetImagesStatisticsResponse: + """ + @summary 获取镜像统计 + + @param request: GetImagesStatisticsRequest + @return: GetImagesStatisticsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.list_image_labels_with_options_async(request, headers, runtime) + return await self.get_images_statistics_with_options_async(request, headers, runtime) - def list_images_with_options( + def get_instance_job_with_options( self, - request: aiwork_space_20210204_models.ListImagesRequest, + instance_job_id: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListImagesResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.labels): - query['Labels'] = request.labels - if not UtilClient.is_unset(request.name): - query['Name'] = request.name - if not UtilClient.is_unset(request.order): - query['Order'] = request.order - if not UtilClient.is_unset(request.page_number): - query['PageNumber'] = request.page_number - if not UtilClient.is_unset(request.page_size): - query['PageSize'] = request.page_size - if not UtilClient.is_unset(request.parent_user_id): - query['ParentUserId'] = request.parent_user_id - if not UtilClient.is_unset(request.query): - query['Query'] = request.query - if not UtilClient.is_unset(request.sort_by): - query['SortBy'] = request.sort_by - if not UtilClient.is_unset(request.user_id): - query['UserId'] = request.user_id - if not UtilClient.is_unset(request.verbose): - query['Verbose'] = request.verbose - if not UtilClient.is_unset(request.workspace_id): - query['WorkspaceId'] = request.workspace_id + ) -> aiwork_space_20210204_models.GetInstanceJobResponse: + """ + @summary 获取任务 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetInstanceJobResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='ListImages', + action='GetInstanceJob', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/images', + pathname=f'/api/v1/instancejobs/{OpenApiUtilClient.get_encode_param(instance_job_id)}', method='GET', auth_type='AK', style='ROA', @@ -4410,49 +6828,31 @@ def list_images_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListImagesResponse(), + aiwork_space_20210204_models.GetInstanceJobResponse(), self.call_api(params, req, runtime) ) - async def list_images_with_options_async( + async def get_instance_job_with_options_async( self, - request: aiwork_space_20210204_models.ListImagesRequest, + instance_job_id: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListImagesResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.labels): - query['Labels'] = request.labels - if not UtilClient.is_unset(request.name): - query['Name'] = request.name - if not UtilClient.is_unset(request.order): - query['Order'] = request.order - if not UtilClient.is_unset(request.page_number): - query['PageNumber'] = request.page_number - if not UtilClient.is_unset(request.page_size): - query['PageSize'] = request.page_size - if not UtilClient.is_unset(request.parent_user_id): - query['ParentUserId'] = request.parent_user_id - if not UtilClient.is_unset(request.query): - query['Query'] = request.query - if not UtilClient.is_unset(request.sort_by): - query['SortBy'] = request.sort_by - if not UtilClient.is_unset(request.user_id): - query['UserId'] = request.user_id - if not UtilClient.is_unset(request.verbose): - query['Verbose'] = request.verbose - if not UtilClient.is_unset(request.workspace_id): - query['WorkspaceId'] = request.workspace_id + ) -> aiwork_space_20210204_models.GetInstanceJobResponse: + """ + @summary 获取任务 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetInstanceJobResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='ListImages', + action='GetInstanceJob', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/images', + pathname=f'/api/v1/instancejobs/{OpenApiUtilClient.get_encode_param(instance_job_id)}', method='GET', auth_type='AK', style='ROA', @@ -4460,52 +6860,67 @@ async def list_images_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListImagesResponse(), + aiwork_space_20210204_models.GetInstanceJobResponse(), await self.call_api_async(params, req, runtime) ) - def list_images( + def get_instance_job( self, - request: aiwork_space_20210204_models.ListImagesRequest, - ) -> aiwork_space_20210204_models.ListImagesResponse: + instance_job_id: str, + ) -> aiwork_space_20210204_models.GetInstanceJobResponse: + """ + @summary 获取任务 + + @return: GetInstanceJobResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.list_images_with_options(request, headers, runtime) + return self.get_instance_job_with_options(instance_job_id, headers, runtime) - async def list_images_async( + async def get_instance_job_async( self, - request: aiwork_space_20210204_models.ListImagesRequest, - ) -> aiwork_space_20210204_models.ListImagesResponse: + instance_job_id: str, + ) -> aiwork_space_20210204_models.GetInstanceJobResponse: + """ + @summary 获取任务 + + @return: GetInstanceJobResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.list_images_with_options_async(request, headers, runtime) + return await self.get_instance_job_with_options_async(instance_job_id, headers, runtime) - def list_members_with_options( + def get_instance_statistics_with_options( self, - workspace_id: str, - request: aiwork_space_20210204_models.ListMembersRequest, + request: aiwork_space_20210204_models.GetInstanceStatisticsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListMembersResponse: + ) -> aiwork_space_20210204_models.GetInstanceStatisticsResponse: + """ + @summary 获得工作空间下实例统计数据 + + @param request: GetInstanceStatisticsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetInstanceStatisticsResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.member_name): - query['MemberName'] = request.member_name - if not UtilClient.is_unset(request.page_number): - query['PageNumber'] = request.page_number - if not UtilClient.is_unset(request.page_size): - query['PageSize'] = request.page_size - if not UtilClient.is_unset(request.roles): - query['Roles'] = request.roles + if not UtilClient.is_unset(request.option): + query['Option'] = request.option + if not UtilClient.is_unset(request.status): + query['Status'] = request.status + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListMembers', + action='GetInstanceStatistics', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/members', + pathname=f'/api/v1/statistics/instances', method='GET', auth_type='AK', style='ROA', @@ -4513,36 +6928,41 @@ def list_members_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListMembersResponse(), + aiwork_space_20210204_models.GetInstanceStatisticsResponse(), self.call_api(params, req, runtime) ) - async def list_members_with_options_async( + async def get_instance_statistics_with_options_async( self, - workspace_id: str, - request: aiwork_space_20210204_models.ListMembersRequest, + request: aiwork_space_20210204_models.GetInstanceStatisticsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListMembersResponse: + ) -> aiwork_space_20210204_models.GetInstanceStatisticsResponse: + """ + @summary 获得工作空间下实例统计数据 + + @param request: GetInstanceStatisticsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetInstanceStatisticsResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.member_name): - query['MemberName'] = request.member_name - if not UtilClient.is_unset(request.page_number): - query['PageNumber'] = request.page_number - if not UtilClient.is_unset(request.page_size): - query['PageSize'] = request.page_size - if not UtilClient.is_unset(request.roles): - query['Roles'] = request.roles + if not UtilClient.is_unset(request.option): + query['Option'] = request.option + if not UtilClient.is_unset(request.status): + query['Status'] = request.status + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListMembers', + action='GetInstanceStatistics', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/members', + pathname=f'/api/v1/statistics/instances', method='GET', auth_type='AK', style='ROA', @@ -4550,47 +6970,68 @@ async def list_members_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListMembersResponse(), + aiwork_space_20210204_models.GetInstanceStatisticsResponse(), await self.call_api_async(params, req, runtime) ) - def list_members( + def get_instance_statistics( self, - workspace_id: str, - request: aiwork_space_20210204_models.ListMembersRequest, - ) -> aiwork_space_20210204_models.ListMembersResponse: + request: aiwork_space_20210204_models.GetInstanceStatisticsRequest, + ) -> aiwork_space_20210204_models.GetInstanceStatisticsResponse: + """ + @summary 获得工作空间下实例统计数据 + + @param request: GetInstanceStatisticsRequest + @return: GetInstanceStatisticsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.list_members_with_options(workspace_id, request, headers, runtime) + return self.get_instance_statistics_with_options(request, headers, runtime) - async def list_members_async( + async def get_instance_statistics_async( self, - workspace_id: str, - request: aiwork_space_20210204_models.ListMembersRequest, - ) -> aiwork_space_20210204_models.ListMembersResponse: + request: aiwork_space_20210204_models.GetInstanceStatisticsRequest, + ) -> aiwork_space_20210204_models.GetInstanceStatisticsResponse: + """ + @summary 获得工作空间下实例统计数据 + + @param request: GetInstanceStatisticsRequest + @return: GetInstanceStatisticsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.list_members_with_options_async(workspace_id, request, headers, runtime) + return await self.get_instance_statistics_with_options_async(request, headers, runtime) - def list_model_domains_with_options( + def get_member_with_options( self, - request: aiwork_space_20210204_models.ListModelDomainsRequest, + workspace_id: str, + request: aiwork_space_20210204_models.GetMemberRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListModelDomainsResponse: + ) -> aiwork_space_20210204_models.GetMemberResponse: + """ + @summary 获取成员 + + @param request: GetMemberRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetMemberResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.model_domain_ids): - query['ModelDomainIds'] = request.model_domain_ids + if not UtilClient.is_unset(request.member_id): + query['MemberId'] = request.member_id + if not UtilClient.is_unset(request.user_id): + query['UserId'] = request.user_id req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListModelDomains', + action='GetMember', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/modeldomains', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/member', method='GET', auth_type='AK', style='ROA', @@ -4598,29 +7039,40 @@ def list_model_domains_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListModelDomainsResponse(), + aiwork_space_20210204_models.GetMemberResponse(), self.call_api(params, req, runtime) ) - async def list_model_domains_with_options_async( + async def get_member_with_options_async( self, - request: aiwork_space_20210204_models.ListModelDomainsRequest, + workspace_id: str, + request: aiwork_space_20210204_models.GetMemberRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListModelDomainsResponse: + ) -> aiwork_space_20210204_models.GetMemberResponse: + """ + @summary 获取成员 + + @param request: GetMemberRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetMemberResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.model_domain_ids): - query['ModelDomainIds'] = request.model_domain_ids + if not UtilClient.is_unset(request.member_id): + query['MemberId'] = request.member_id + if not UtilClient.is_unset(request.user_id): + query['UserId'] = request.user_id req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListModelDomains', + action='GetMember', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/modeldomains', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/member', method='GET', auth_type='AK', style='ROA', @@ -4628,70 +7080,61 @@ async def list_model_domains_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListModelDomainsResponse(), + aiwork_space_20210204_models.GetMemberResponse(), await self.call_api_async(params, req, runtime) ) - def list_model_domains( + def get_member( self, - request: aiwork_space_20210204_models.ListModelDomainsRequest, - ) -> aiwork_space_20210204_models.ListModelDomainsResponse: + workspace_id: str, + request: aiwork_space_20210204_models.GetMemberRequest, + ) -> aiwork_space_20210204_models.GetMemberResponse: + """ + @summary 获取成员 + + @param request: GetMemberRequest + @return: GetMemberResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.list_model_domains_with_options(request, headers, runtime) + return self.get_member_with_options(workspace_id, request, headers, runtime) - async def list_model_domains_async( + async def get_member_async( self, - request: aiwork_space_20210204_models.ListModelDomainsRequest, - ) -> aiwork_space_20210204_models.ListModelDomainsResponse: + workspace_id: str, + request: aiwork_space_20210204_models.GetMemberRequest, + ) -> aiwork_space_20210204_models.GetMemberResponse: + """ + @summary 获取成员 + + @param request: GetMemberRequest + @return: GetMemberResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.list_model_domains_with_options_async(request, headers, runtime) + return await self.get_member_with_options_async(workspace_id, request, headers, runtime) - def list_model_versions_with_options( + def get_model_with_options( self, model_id: str, - request: aiwork_space_20210204_models.ListModelVersionsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListModelVersionsResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.approval_status): - query['ApprovalStatus'] = request.approval_status - if not UtilClient.is_unset(request.format_type): - query['FormatType'] = request.format_type - if not UtilClient.is_unset(request.framework_type): - query['FrameworkType'] = request.framework_type - if not UtilClient.is_unset(request.label): - query['Label'] = request.label - if not UtilClient.is_unset(request.label_string): - query['LabelString'] = request.label_string - if not UtilClient.is_unset(request.labels): - query['Labels'] = request.labels - if not UtilClient.is_unset(request.order): - query['Order'] = request.order - if not UtilClient.is_unset(request.page_number): - query['PageNumber'] = request.page_number - if not UtilClient.is_unset(request.page_size): - query['PageSize'] = request.page_size - if not UtilClient.is_unset(request.sort_by): - query['SortBy'] = request.sort_by - if not UtilClient.is_unset(request.source_id): - query['SourceId'] = request.source_id - if not UtilClient.is_unset(request.source_type): - query['SourceType'] = request.source_type - if not UtilClient.is_unset(request.version_name): - query['VersionName'] = request.version_name + ) -> aiwork_space_20210204_models.GetModelResponse: + """ + @summary 获取模型 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetModelResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='ListModelVersions', + action='GetModel', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}/versions', + pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}', method='GET', auth_type='AK', style='ROA', @@ -4699,54 +7142,31 @@ def list_model_versions_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListModelVersionsResponse(), + aiwork_space_20210204_models.GetModelResponse(), self.call_api(params, req, runtime) ) - async def list_model_versions_with_options_async( + async def get_model_with_options_async( self, model_id: str, - request: aiwork_space_20210204_models.ListModelVersionsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListModelVersionsResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.approval_status): - query['ApprovalStatus'] = request.approval_status - if not UtilClient.is_unset(request.format_type): - query['FormatType'] = request.format_type - if not UtilClient.is_unset(request.framework_type): - query['FrameworkType'] = request.framework_type - if not UtilClient.is_unset(request.label): - query['Label'] = request.label - if not UtilClient.is_unset(request.label_string): - query['LabelString'] = request.label_string - if not UtilClient.is_unset(request.labels): - query['Labels'] = request.labels - if not UtilClient.is_unset(request.order): - query['Order'] = request.order - if not UtilClient.is_unset(request.page_number): - query['PageNumber'] = request.page_number - if not UtilClient.is_unset(request.page_size): - query['PageSize'] = request.page_size - if not UtilClient.is_unset(request.sort_by): - query['SortBy'] = request.sort_by - if not UtilClient.is_unset(request.source_id): - query['SourceId'] = request.source_id - if not UtilClient.is_unset(request.source_type): - query['SourceType'] = request.source_type - if not UtilClient.is_unset(request.version_name): - query['VersionName'] = request.version_name + ) -> aiwork_space_20210204_models.GetModelResponse: + """ + @summary 获取模型 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetModelResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='ListModelVersions', + action='GetModel', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}/versions', + pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}', method='GET', auth_type='AK', style='ROA', @@ -4754,73 +7174,58 @@ async def list_model_versions_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListModelVersionsResponse(), + aiwork_space_20210204_models.GetModelResponse(), await self.call_api_async(params, req, runtime) ) - def list_model_versions( + def get_model( self, model_id: str, - request: aiwork_space_20210204_models.ListModelVersionsRequest, - ) -> aiwork_space_20210204_models.ListModelVersionsResponse: + ) -> aiwork_space_20210204_models.GetModelResponse: + """ + @summary 获取模型 + + @return: GetModelResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.list_model_versions_with_options(model_id, request, headers, runtime) + return self.get_model_with_options(model_id, headers, runtime) - async def list_model_versions_async( + async def get_model_async( self, model_id: str, - request: aiwork_space_20210204_models.ListModelVersionsRequest, - ) -> aiwork_space_20210204_models.ListModelVersionsResponse: + ) -> aiwork_space_20210204_models.GetModelResponse: + """ + @summary 获取模型 + + @return: GetModelResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.list_model_versions_with_options_async(model_id, request, headers, runtime) + return await self.get_model_with_options_async(model_id, headers, runtime) - def list_models_with_options( + def get_model_version_with_options( self, - request: aiwork_space_20210204_models.ListModelsRequest, + model_id: str, + version_name: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListModelsResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.domain): - query['Domain'] = request.domain - if not UtilClient.is_unset(request.label): - query['Label'] = request.label - if not UtilClient.is_unset(request.label_string): - query['LabelString'] = request.label_string - if not UtilClient.is_unset(request.labels): - query['Labels'] = request.labels - if not UtilClient.is_unset(request.model_name): - query['ModelName'] = request.model_name - if not UtilClient.is_unset(request.order): - query['Order'] = request.order - if not UtilClient.is_unset(request.origin): - query['Origin'] = request.origin - if not UtilClient.is_unset(request.page_number): - query['PageNumber'] = request.page_number - if not UtilClient.is_unset(request.page_size): - query['PageSize'] = request.page_size - if not UtilClient.is_unset(request.provider): - query['Provider'] = request.provider - if not UtilClient.is_unset(request.query): - query['Query'] = request.query - if not UtilClient.is_unset(request.sort_by): - query['SortBy'] = request.sort_by - if not UtilClient.is_unset(request.task): - query['Task'] = request.task - if not UtilClient.is_unset(request.workspace_id): - query['WorkspaceId'] = request.workspace_id + ) -> aiwork_space_20210204_models.GetModelVersionResponse: + """ + @summary 获取模型版本 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetModelVersionResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='ListModels', + action='GetModelVersion', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/models', + pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}/versions/{OpenApiUtilClient.get_encode_param(version_name)}', method='GET', auth_type='AK', style='ROA', @@ -4828,55 +7233,32 @@ def list_models_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListModelsResponse(), + aiwork_space_20210204_models.GetModelVersionResponse(), self.call_api(params, req, runtime) ) - async def list_models_with_options_async( + async def get_model_version_with_options_async( self, - request: aiwork_space_20210204_models.ListModelsRequest, + model_id: str, + version_name: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListModelsResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.domain): - query['Domain'] = request.domain - if not UtilClient.is_unset(request.label): - query['Label'] = request.label - if not UtilClient.is_unset(request.label_string): - query['LabelString'] = request.label_string - if not UtilClient.is_unset(request.labels): - query['Labels'] = request.labels - if not UtilClient.is_unset(request.model_name): - query['ModelName'] = request.model_name - if not UtilClient.is_unset(request.order): - query['Order'] = request.order - if not UtilClient.is_unset(request.origin): - query['Origin'] = request.origin - if not UtilClient.is_unset(request.page_number): - query['PageNumber'] = request.page_number - if not UtilClient.is_unset(request.page_size): - query['PageSize'] = request.page_size - if not UtilClient.is_unset(request.provider): - query['Provider'] = request.provider - if not UtilClient.is_unset(request.query): - query['Query'] = request.query - if not UtilClient.is_unset(request.sort_by): - query['SortBy'] = request.sort_by - if not UtilClient.is_unset(request.task): - query['Task'] = request.task - if not UtilClient.is_unset(request.workspace_id): - query['WorkspaceId'] = request.workspace_id + ) -> aiwork_space_20210204_models.GetModelVersionResponse: + """ + @summary 获取模型版本 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetModelVersionResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='ListModels', + action='GetModelVersion', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/models', + pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}/versions/{OpenApiUtilClient.get_encode_param(version_name)}', method='GET', auth_type='AK', style='ROA', @@ -4884,140 +7266,189 @@ async def list_models_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListModelsResponse(), + aiwork_space_20210204_models.GetModelVersionResponse(), await self.call_api_async(params, req, runtime) ) - def list_models( + def get_model_version( self, - request: aiwork_space_20210204_models.ListModelsRequest, - ) -> aiwork_space_20210204_models.ListModelsResponse: + model_id: str, + version_name: str, + ) -> aiwork_space_20210204_models.GetModelVersionResponse: + """ + @summary 获取模型版本 + + @return: GetModelVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.list_models_with_options(request, headers, runtime) + return self.get_model_version_with_options(model_id, version_name, headers, runtime) - async def list_models_async( + async def get_model_version_async( self, - request: aiwork_space_20210204_models.ListModelsRequest, - ) -> aiwork_space_20210204_models.ListModelsResponse: + model_id: str, + version_name: str, + ) -> aiwork_space_20210204_models.GetModelVersionResponse: + """ + @summary 获取模型版本 + + @return: GetModelVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.list_models_with_options_async(request, headers, runtime) + return await self.get_model_version_with_options_async(model_id, version_name, headers, runtime) - def list_module_configs_with_options( + def get_pay_as_you_go_price_with_options( self, - request: aiwork_space_20210204_models.ListModuleConfigsRequest, + request: aiwork_space_20210204_models.GetPayAsYouGoPriceRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListModuleConfigsResponse: + ) -> aiwork_space_20210204_models.GetPayAsYouGoPriceResponse: + """ + @summary 查询阿里云商品后付费价格 + + @param request: GetPayAsYouGoPriceRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetPayAsYouGoPriceResponse + """ UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.module_codes): - query['ModuleCodes'] = request.module_codes - if not UtilClient.is_unset(request.region): - query['Region'] = request.region + body = {} + if not UtilClient.is_unset(request.module_list): + body['ModuleList'] = request.module_list + if not UtilClient.is_unset(request.product_code): + body['ProductCode'] = request.product_code + if not UtilClient.is_unset(request.product_type): + body['ProductType'] = request.product_type + if not UtilClient.is_unset(request.subscription_type): + body['SubscriptionType'] = request.subscription_type req = open_api_models.OpenApiRequest( headers=headers, - query=OpenApiUtilClient.query(query) + body=OpenApiUtilClient.parse_to_map(body) ) params = open_api_models.Params( - action='ListModuleConfigs', + action='GetPayAsYouGoPrice', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/moduleconfigs', - method='GET', + pathname=f'/api/v1/proxy/getpayasyougoprice', + method='POST', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListModuleConfigsResponse(), + aiwork_space_20210204_models.GetPayAsYouGoPriceResponse(), self.call_api(params, req, runtime) ) - async def list_module_configs_with_options_async( + async def get_pay_as_you_go_price_with_options_async( self, - request: aiwork_space_20210204_models.ListModuleConfigsRequest, + request: aiwork_space_20210204_models.GetPayAsYouGoPriceRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListModuleConfigsResponse: + ) -> aiwork_space_20210204_models.GetPayAsYouGoPriceResponse: + """ + @summary 查询阿里云商品后付费价格 + + @param request: GetPayAsYouGoPriceRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetPayAsYouGoPriceResponse + """ UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.module_codes): - query['ModuleCodes'] = request.module_codes - if not UtilClient.is_unset(request.region): - query['Region'] = request.region + body = {} + if not UtilClient.is_unset(request.module_list): + body['ModuleList'] = request.module_list + if not UtilClient.is_unset(request.product_code): + body['ProductCode'] = request.product_code + if not UtilClient.is_unset(request.product_type): + body['ProductType'] = request.product_type + if not UtilClient.is_unset(request.subscription_type): + body['SubscriptionType'] = request.subscription_type req = open_api_models.OpenApiRequest( headers=headers, - query=OpenApiUtilClient.query(query) + body=OpenApiUtilClient.parse_to_map(body) ) params = open_api_models.Params( - action='ListModuleConfigs', + action='GetPayAsYouGoPrice', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/moduleconfigs', - method='GET', + pathname=f'/api/v1/proxy/getpayasyougoprice', + method='POST', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListModuleConfigsResponse(), + aiwork_space_20210204_models.GetPayAsYouGoPriceResponse(), await self.call_api_async(params, req, runtime) ) - def list_module_configs( + def get_pay_as_you_go_price( self, - request: aiwork_space_20210204_models.ListModuleConfigsRequest, - ) -> aiwork_space_20210204_models.ListModuleConfigsResponse: + request: aiwork_space_20210204_models.GetPayAsYouGoPriceRequest, + ) -> aiwork_space_20210204_models.GetPayAsYouGoPriceResponse: + """ + @summary 查询阿里云商品后付费价格 + + @param request: GetPayAsYouGoPriceRequest + @return: GetPayAsYouGoPriceResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.list_module_configs_with_options(request, headers, runtime) + return self.get_pay_as_you_go_price_with_options(request, headers, runtime) - async def list_module_configs_async( + async def get_pay_as_you_go_price_async( self, - request: aiwork_space_20210204_models.ListModuleConfigsRequest, - ) -> aiwork_space_20210204_models.ListModuleConfigsResponse: + request: aiwork_space_20210204_models.GetPayAsYouGoPriceRequest, + ) -> aiwork_space_20210204_models.GetPayAsYouGoPriceResponse: + """ + @summary 查询阿里云商品后付费价格 + + @param request: GetPayAsYouGoPriceRequest + @return: GetPayAsYouGoPriceResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.list_module_configs_with_options_async(request, headers, runtime) + return await self.get_pay_as_you_go_price_with_options_async(request, headers, runtime) - def list_operation_logs_with_options( + def get_permission_with_options( self, workspace_id: str, - request: aiwork_space_20210204_models.ListOperationLogsRequest, + permission_code: str, + request: aiwork_space_20210204_models.GetPermissionRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListOperationLogsResponse: + ) -> aiwork_space_20210204_models.GetPermissionResponse: + """ + @summary 获取权限,若无权限则返回错误 + + @param request: GetPermissionRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetPermissionResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.entity_status): - query['EntityStatus'] = request.entity_status - if not UtilClient.is_unset(request.entity_types): - query['EntityTypes'] = request.entity_types - if not UtilClient.is_unset(request.operation_status): - query['OperationStatus'] = request.operation_status - if not UtilClient.is_unset(request.operations): - query['Operations'] = request.operations - if not UtilClient.is_unset(request.order): - query['Order'] = request.order - if not UtilClient.is_unset(request.page_number): - query['PageNumber'] = request.page_number - if not UtilClient.is_unset(request.page_size): - query['PageSize'] = request.page_size - if not UtilClient.is_unset(request.sort_by): - query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.accessibility): + query['Accessibility'] = request.accessibility + if not UtilClient.is_unset(request.creator): + query['Creator'] = request.creator + if not UtilClient.is_unset(request.option): + query['Option'] = request.option + if not UtilClient.is_unset(request.resource): + query['Resource'] = request.resource req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListOperationLogs', + action='GetPermission', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/logs', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/permissions/{OpenApiUtilClient.get_encode_param(permission_code)}', method='GET', auth_type='AK', style='ROA', @@ -5025,44 +7456,45 @@ def list_operation_logs_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListOperationLogsResponse(), + aiwork_space_20210204_models.GetPermissionResponse(), self.call_api(params, req, runtime) ) - async def list_operation_logs_with_options_async( + async def get_permission_with_options_async( self, workspace_id: str, - request: aiwork_space_20210204_models.ListOperationLogsRequest, + permission_code: str, + request: aiwork_space_20210204_models.GetPermissionRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListOperationLogsResponse: + ) -> aiwork_space_20210204_models.GetPermissionResponse: + """ + @summary 获取权限,若无权限则返回错误 + + @param request: GetPermissionRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetPermissionResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.entity_status): - query['EntityStatus'] = request.entity_status - if not UtilClient.is_unset(request.entity_types): - query['EntityTypes'] = request.entity_types - if not UtilClient.is_unset(request.operation_status): - query['OperationStatus'] = request.operation_status - if not UtilClient.is_unset(request.operations): - query['Operations'] = request.operations - if not UtilClient.is_unset(request.order): - query['Order'] = request.order - if not UtilClient.is_unset(request.page_number): - query['PageNumber'] = request.page_number - if not UtilClient.is_unset(request.page_size): - query['PageSize'] = request.page_size - if not UtilClient.is_unset(request.sort_by): - query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.accessibility): + query['Accessibility'] = request.accessibility + if not UtilClient.is_unset(request.creator): + query['Creator'] = request.creator + if not UtilClient.is_unset(request.option): + query['Option'] = request.option + if not UtilClient.is_unset(request.resource): + query['Resource'] = request.resource req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListOperationLogs', + action='GetPermission', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/logs', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/permissions/{OpenApiUtilClient.get_encode_param(permission_code)}', method='GET', auth_type='AK', style='ROA', @@ -5070,42 +7502,71 @@ async def list_operation_logs_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListOperationLogsResponse(), + aiwork_space_20210204_models.GetPermissionResponse(), await self.call_api_async(params, req, runtime) ) - def list_operation_logs( + def get_permission( self, workspace_id: str, - request: aiwork_space_20210204_models.ListOperationLogsRequest, - ) -> aiwork_space_20210204_models.ListOperationLogsResponse: + permission_code: str, + request: aiwork_space_20210204_models.GetPermissionRequest, + ) -> aiwork_space_20210204_models.GetPermissionResponse: + """ + @summary 获取权限,若无权限则返回错误 + + @param request: GetPermissionRequest + @return: GetPermissionResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.list_operation_logs_with_options(workspace_id, request, headers, runtime) + return self.get_permission_with_options(workspace_id, permission_code, request, headers, runtime) - async def list_operation_logs_async( + async def get_permission_async( self, workspace_id: str, - request: aiwork_space_20210204_models.ListOperationLogsRequest, - ) -> aiwork_space_20210204_models.ListOperationLogsResponse: + permission_code: str, + request: aiwork_space_20210204_models.GetPermissionRequest, + ) -> aiwork_space_20210204_models.GetPermissionResponse: + """ + @summary 获取权限,若无权限则返回错误 + + @param request: GetPermissionRequest + @return: GetPermissionResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.list_operation_logs_with_options_async(workspace_id, request, headers, runtime) + return await self.get_permission_with_options_async(workspace_id, permission_code, request, headers, runtime) - def list_permissions_with_options( + def get_resource_with_options( self, + resource_id: str, workspace_id: str, + request: aiwork_space_20210204_models.GetResourceRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListPermissionsResponse: + ) -> aiwork_space_20210204_models.GetResourceResponse: + """ + @summary 获取工作空间资源 + + @param request: GetResourceRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetResourceResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.resource_type): + query['ResourceType'] = request.resource_type req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListPermissions', + action='GetResource', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/permissions', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/resources/{OpenApiUtilClient.get_encode_param(resource_id)}', method='GET', auth_type='AK', style='ROA', @@ -5113,24 +7574,39 @@ def list_permissions_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListPermissionsResponse(), + aiwork_space_20210204_models.GetResourceResponse(), self.call_api(params, req, runtime) ) - async def list_permissions_with_options_async( + async def get_resource_with_options_async( self, + resource_id: str, workspace_id: str, + request: aiwork_space_20210204_models.GetResourceRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListPermissionsResponse: + ) -> aiwork_space_20210204_models.GetResourceResponse: + """ + @summary 获取工作空间资源 + + @param request: GetResourceRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetResourceResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.resource_type): + query['ResourceType'] = request.resource_type req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListPermissions', + action='GetResource', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/permissions', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/resources/{OpenApiUtilClient.get_encode_param(resource_id)}', method='GET', auth_type='AK', style='ROA', @@ -5138,45 +7614,69 @@ async def list_permissions_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListPermissionsResponse(), + aiwork_space_20210204_models.GetResourceResponse(), await self.call_api_async(params, req, runtime) ) - def list_permissions( + def get_resource( self, + resource_id: str, workspace_id: str, - ) -> aiwork_space_20210204_models.ListPermissionsResponse: + request: aiwork_space_20210204_models.GetResourceRequest, + ) -> aiwork_space_20210204_models.GetResourceResponse: + """ + @summary 获取工作空间资源 + + @param request: GetResourceRequest + @return: GetResourceResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.list_permissions_with_options(workspace_id, headers, runtime) + return self.get_resource_with_options(resource_id, workspace_id, request, headers, runtime) - async def list_permissions_async( + async def get_resource_async( self, + resource_id: str, workspace_id: str, - ) -> aiwork_space_20210204_models.ListPermissionsResponse: + request: aiwork_space_20210204_models.GetResourceRequest, + ) -> aiwork_space_20210204_models.GetResourceResponse: + """ + @summary 获取工作空间资源 + + @param request: GetResourceRequest + @return: GetResourceResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.list_permissions_with_options_async(workspace_id, headers, runtime) + return await self.get_resource_with_options_async(resource_id, workspace_id, request, headers, runtime) - def list_product_authorizations_with_options( + def get_role_statistics_with_options( self, - request: aiwork_space_20210204_models.ListProductAuthorizationsRequest, + request: aiwork_space_20210204_models.GetRoleStatisticsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListProductAuthorizationsResponse: + ) -> aiwork_space_20210204_models.GetRoleStatisticsResponse: + """ + @summary 获得角色统计 + + @param request: GetRoleStatisticsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetRoleStatisticsResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.ram_role_names): - query['RamRoleNames'] = request.ram_role_names + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListProductAuthorizations', + action='GetRoleStatistics', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/productauthorizations', + pathname=f'/api/v1/statistics/roles', method='GET', auth_type='AK', style='ROA', @@ -5184,29 +7684,37 @@ def list_product_authorizations_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListProductAuthorizationsResponse(), + aiwork_space_20210204_models.GetRoleStatisticsResponse(), self.call_api(params, req, runtime) ) - async def list_product_authorizations_with_options_async( + async def get_role_statistics_with_options_async( self, - request: aiwork_space_20210204_models.ListProductAuthorizationsRequest, + request: aiwork_space_20210204_models.GetRoleStatisticsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListProductAuthorizationsResponse: + ) -> aiwork_space_20210204_models.GetRoleStatisticsResponse: + """ + @summary 获得角色统计 + + @param request: GetRoleStatisticsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetRoleStatisticsResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.ram_role_names): - query['RamRoleNames'] = request.ram_role_names + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListProductAuthorizations', + action='GetRoleStatistics', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/productauthorizations', + pathname=f'/api/v1/statistics/roles', method='GET', auth_type='AK', style='ROA', @@ -5214,49 +7722,59 @@ async def list_product_authorizations_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListProductAuthorizationsResponse(), + aiwork_space_20210204_models.GetRoleStatisticsResponse(), await self.call_api_async(params, req, runtime) ) - def list_product_authorizations( + def get_role_statistics( self, - request: aiwork_space_20210204_models.ListProductAuthorizationsRequest, - ) -> aiwork_space_20210204_models.ListProductAuthorizationsResponse: + request: aiwork_space_20210204_models.GetRoleStatisticsRequest, + ) -> aiwork_space_20210204_models.GetRoleStatisticsResponse: + """ + @summary 获得角色统计 + + @param request: GetRoleStatisticsRequest + @return: GetRoleStatisticsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.list_product_authorizations_with_options(request, headers, runtime) + return self.get_role_statistics_with_options(request, headers, runtime) - async def list_product_authorizations_async( + async def get_role_statistics_async( self, - request: aiwork_space_20210204_models.ListProductAuthorizationsRequest, - ) -> aiwork_space_20210204_models.ListProductAuthorizationsResponse: + request: aiwork_space_20210204_models.GetRoleStatisticsRequest, + ) -> aiwork_space_20210204_models.GetRoleStatisticsResponse: + """ + @summary 获得角色统计 + + @param request: GetRoleStatisticsRequest + @return: GetRoleStatisticsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.list_product_authorizations_with_options_async(request, headers, runtime) + return await self.get_role_statistics_with_options_async(request, headers, runtime) - def list_products_with_options( + def get_service_template_with_options( self, - request: aiwork_space_20210204_models.ListProductsRequest, + service_template_id: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListProductsResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.product_codes): - query['ProductCodes'] = request.product_codes - if not UtilClient.is_unset(request.service_codes): - query['ServiceCodes'] = request.service_codes - if not UtilClient.is_unset(request.verbose): - query['Verbose'] = request.verbose + ) -> aiwork_space_20210204_models.GetServiceTemplateResponse: + """ + @summary 获取服务模版 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetServiceTemplateResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='ListProducts', + action='GetServiceTemplate', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/products', + pathname=f'/api/v1/servicetemplates/{OpenApiUtilClient.get_encode_param(service_template_id)}', method='GET', auth_type='AK', style='ROA', @@ -5264,33 +7782,31 @@ def list_products_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListProductsResponse(), + aiwork_space_20210204_models.GetServiceTemplateResponse(), self.call_api(params, req, runtime) ) - async def list_products_with_options_async( + async def get_service_template_with_options_async( self, - request: aiwork_space_20210204_models.ListProductsRequest, + service_template_id: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListProductsResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.product_codes): - query['ProductCodes'] = request.product_codes - if not UtilClient.is_unset(request.service_codes): - query['ServiceCodes'] = request.service_codes - if not UtilClient.is_unset(request.verbose): - query['Verbose'] = request.verbose + ) -> aiwork_space_20210204_models.GetServiceTemplateResponse: + """ + @summary 获取服务模版 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetServiceTemplateResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='ListProducts', + action='GetServiceTemplate', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/products', + pathname=f'/api/v1/servicetemplates/{OpenApiUtilClient.get_encode_param(service_template_id)}', method='GET', auth_type='AK', style='ROA', @@ -5298,45 +7814,57 @@ async def list_products_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListProductsResponse(), + aiwork_space_20210204_models.GetServiceTemplateResponse(), await self.call_api_async(params, req, runtime) ) - def list_products( + def get_service_template( self, - request: aiwork_space_20210204_models.ListProductsRequest, - ) -> aiwork_space_20210204_models.ListProductsResponse: + service_template_id: str, + ) -> aiwork_space_20210204_models.GetServiceTemplateResponse: + """ + @summary 获取服务模版 + + @return: GetServiceTemplateResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.list_products_with_options(request, headers, runtime) + return self.get_service_template_with_options(service_template_id, headers, runtime) - async def list_products_async( + async def get_service_template_async( self, - request: aiwork_space_20210204_models.ListProductsRequest, - ) -> aiwork_space_20210204_models.ListProductsResponse: + service_template_id: str, + ) -> aiwork_space_20210204_models.GetServiceTemplateResponse: + """ + @summary 获取服务模版 + + @return: GetServiceTemplateResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.list_products_with_options_async(request, headers, runtime) + return await self.get_service_template_with_options_async(service_template_id, headers, runtime) - def list_quotas_with_options( + def get_trial_with_options( self, - request: aiwork_space_20210204_models.ListQuotasRequest, + trial_id: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListQuotasResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.name): - query['Name'] = request.name + ) -> aiwork_space_20210204_models.GetTrialResponse: + """ + @summary Get trial + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetTrialResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='ListQuotas', + action='GetTrial', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/quotas', + pathname=f'/api/v1/trials/{OpenApiUtilClient.get_encode_param(trial_id)}', method='GET', auth_type='AK', style='ROA', @@ -5344,29 +7872,31 @@ def list_quotas_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListQuotasResponse(), + aiwork_space_20210204_models.GetTrialResponse(), self.call_api(params, req, runtime) ) - async def list_quotas_with_options_async( + async def get_trial_with_options_async( self, - request: aiwork_space_20210204_models.ListQuotasRequest, + trial_id: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListQuotasResponse: - UtilClient.validate_model(request) - query = {} - if not UtilClient.is_unset(request.name): - query['Name'] = request.name + ) -> aiwork_space_20210204_models.GetTrialResponse: + """ + @summary Get trial + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetTrialResponse + """ req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers ) params = open_api_models.Params( - action='ListQuotas', + action='GetTrial', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/quotas', + pathname=f'/api/v1/trials/{OpenApiUtilClient.get_encode_param(trial_id)}', method='GET', auth_type='AK', style='ROA', @@ -5374,61 +7904,64 @@ async def list_quotas_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListQuotasResponse(), + aiwork_space_20210204_models.GetTrialResponse(), await self.call_api_async(params, req, runtime) ) - def list_quotas( + def get_trial( self, - request: aiwork_space_20210204_models.ListQuotasRequest, - ) -> aiwork_space_20210204_models.ListQuotasResponse: + trial_id: str, + ) -> aiwork_space_20210204_models.GetTrialResponse: + """ + @summary Get trial + + @return: GetTrialResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.list_quotas_with_options(request, headers, runtime) + return self.get_trial_with_options(trial_id, headers, runtime) - async def list_quotas_async( + async def get_trial_async( self, - request: aiwork_space_20210204_models.ListQuotasRequest, - ) -> aiwork_space_20210204_models.ListQuotasResponse: + trial_id: str, + ) -> aiwork_space_20210204_models.GetTrialResponse: + """ + @summary Get trial + + @return: GetTrialResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.list_quotas_with_options_async(request, headers, runtime) + return await self.get_trial_with_options_async(trial_id, headers, runtime) - def list_resources_with_options( + def get_workspace_with_options( self, - request: aiwork_space_20210204_models.ListResourcesRequest, + workspace_id: str, + request: aiwork_space_20210204_models.GetWorkspaceRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListResourcesResponse: + ) -> aiwork_space_20210204_models.GetWorkspaceResponse: + """ + @summary 获取工作空间 + + @param request: GetWorkspaceRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetWorkspaceResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.group_name): - query['GroupName'] = request.group_name - if not UtilClient.is_unset(request.option): - query['Option'] = request.option - if not UtilClient.is_unset(request.page_number): - query['PageNumber'] = request.page_number - if not UtilClient.is_unset(request.page_size): - query['PageSize'] = request.page_size - if not UtilClient.is_unset(request.product_types): - query['ProductTypes'] = request.product_types - if not UtilClient.is_unset(request.resource_name): - query['ResourceName'] = request.resource_name - if not UtilClient.is_unset(request.resource_types): - query['ResourceTypes'] = request.resource_types if not UtilClient.is_unset(request.verbose): query['Verbose'] = request.verbose - if not UtilClient.is_unset(request.workspace_id): - query['WorkspaceId'] = request.workspace_id req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListResources', + action='GetWorkspace', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/resources', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}', method='GET', auth_type='AK', style='ROA', @@ -5436,45 +7969,38 @@ def list_resources_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListResourcesResponse(), + aiwork_space_20210204_models.GetWorkspaceResponse(), self.call_api(params, req, runtime) ) - async def list_resources_with_options_async( + async def get_workspace_with_options_async( self, - request: aiwork_space_20210204_models.ListResourcesRequest, + workspace_id: str, + request: aiwork_space_20210204_models.GetWorkspaceRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListResourcesResponse: + ) -> aiwork_space_20210204_models.GetWorkspaceResponse: + """ + @summary 获取工作空间 + + @param request: GetWorkspaceRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetWorkspaceResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.group_name): - query['GroupName'] = request.group_name - if not UtilClient.is_unset(request.option): - query['Option'] = request.option - if not UtilClient.is_unset(request.page_number): - query['PageNumber'] = request.page_number - if not UtilClient.is_unset(request.page_size): - query['PageSize'] = request.page_size - if not UtilClient.is_unset(request.product_types): - query['ProductTypes'] = request.product_types - if not UtilClient.is_unset(request.resource_name): - query['ResourceName'] = request.resource_name - if not UtilClient.is_unset(request.resource_types): - query['ResourceTypes'] = request.resource_types if not UtilClient.is_unset(request.verbose): query['Verbose'] = request.verbose - if not UtilClient.is_unset(request.workspace_id): - query['WorkspaceId'] = request.workspace_id req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListResources', + action='GetWorkspace', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/resources', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}', method='GET', auth_type='AK', style='ROA', @@ -5482,53 +8008,171 @@ async def list_resources_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListResourcesResponse(), + aiwork_space_20210204_models.GetWorkspaceResponse(), await self.call_api_async(params, req, runtime) ) - def list_resources( + def get_workspace( self, - request: aiwork_space_20210204_models.ListResourcesRequest, - ) -> aiwork_space_20210204_models.ListResourcesResponse: + workspace_id: str, + request: aiwork_space_20210204_models.GetWorkspaceRequest, + ) -> aiwork_space_20210204_models.GetWorkspaceResponse: + """ + @summary 获取工作空间 + + @param request: GetWorkspaceRequest + @return: GetWorkspaceResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.list_resources_with_options(request, headers, runtime) + return self.get_workspace_with_options(workspace_id, request, headers, runtime) - async def list_resources_async( + async def get_workspace_async( self, - request: aiwork_space_20210204_models.ListResourcesRequest, - ) -> aiwork_space_20210204_models.ListResourcesResponse: + workspace_id: str, + request: aiwork_space_20210204_models.GetWorkspaceRequest, + ) -> aiwork_space_20210204_models.GetWorkspaceResponse: + """ + @summary 获取工作空间 + + @param request: GetWorkspaceRequest + @return: GetWorkspaceResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.list_resources_with_options_async(request, headers, runtime) + return await self.get_workspace_with_options_async(workspace_id, request, headers, runtime) - def list_users_with_options( + def get_workspace_role_with_options( self, - request: aiwork_space_20210204_models.ListUsersRequest, + workspace_id: str, + role_id: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListUsersResponse: + ) -> aiwork_space_20210204_models.GetWorkspaceRoleResponse: + """ + @summary 获取工作空间角色 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetWorkspaceRoleResponse + """ + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='GetWorkspaceRole', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/roles/{OpenApiUtilClient.get_encode_param(role_id)}', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.GetWorkspaceRoleResponse(), + self.call_api(params, req, runtime) + ) + + async def get_workspace_role_with_options_async( + self, + workspace_id: str, + role_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.GetWorkspaceRoleResponse: + """ + @summary 获取工作空间角色 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetWorkspaceRoleResponse + """ + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='GetWorkspaceRole', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/roles/{OpenApiUtilClient.get_encode_param(role_id)}', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.GetWorkspaceRoleResponse(), + await self.call_api_async(params, req, runtime) + ) + + def get_workspace_role( + self, + workspace_id: str, + role_id: str, + ) -> aiwork_space_20210204_models.GetWorkspaceRoleResponse: + """ + @summary 获取工作空间角色 + + @return: GetWorkspaceRoleResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.get_workspace_role_with_options(workspace_id, role_id, headers, runtime) + + async def get_workspace_role_async( + self, + workspace_id: str, + role_id: str, + ) -> aiwork_space_20210204_models.GetWorkspaceRoleResponse: + """ + @summary 获取工作空间角色 + + @return: GetWorkspaceRoleResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.get_workspace_role_with_options_async(workspace_id, role_id, headers, runtime) + + def list_code_sources_with_options( + self, + request: aiwork_space_20210204_models.ListCodeSourcesRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListCodeSourcesResponse: + """ + @summary 获取代码源配置列表 + + @param request: ListCodeSourcesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListCodeSourcesResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.account_types): - query['AccountTypes'] = request.account_types + if not UtilClient.is_unset(request.display_name): + query['DisplayName'] = request.display_name + if not UtilClient.is_unset(request.order): + query['Order'] = request.order if not UtilClient.is_unset(request.page_number): query['PageNumber'] = request.page_number if not UtilClient.is_unset(request.page_size): query['PageSize'] = request.page_size - if not UtilClient.is_unset(request.user_ids): - query['UserIds'] = request.user_ids - if not UtilClient.is_unset(request.user_name): - query['UserName'] = request.user_name + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListUsers', + action='ListCodeSources', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/users', + pathname=f'/api/v1/codesources', method='GET', auth_type='AK', style='ROA', @@ -5536,37 +8180,47 @@ def list_users_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListUsersResponse(), + aiwork_space_20210204_models.ListCodeSourcesResponse(), self.call_api(params, req, runtime) ) - async def list_users_with_options_async( + async def list_code_sources_with_options_async( self, - request: aiwork_space_20210204_models.ListUsersRequest, + request: aiwork_space_20210204_models.ListCodeSourcesRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListUsersResponse: + ) -> aiwork_space_20210204_models.ListCodeSourcesResponse: + """ + @summary 获取代码源配置列表 + + @param request: ListCodeSourcesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListCodeSourcesResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.account_types): - query['AccountTypes'] = request.account_types + if not UtilClient.is_unset(request.display_name): + query['DisplayName'] = request.display_name + if not UtilClient.is_unset(request.order): + query['Order'] = request.order if not UtilClient.is_unset(request.page_number): query['PageNumber'] = request.page_number if not UtilClient.is_unset(request.page_size): query['PageSize'] = request.page_size - if not UtilClient.is_unset(request.user_ids): - query['UserIds'] = request.user_ids - if not UtilClient.is_unset(request.user_name): - query['UserName'] = request.user_name + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListUsers', + action='ListCodeSources', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/users', + pathname=f'/api/v1/codesources', method='GET', auth_type='AK', style='ROA', @@ -5574,40 +8228,67 @@ async def list_users_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListUsersResponse(), + aiwork_space_20210204_models.ListCodeSourcesResponse(), await self.call_api_async(params, req, runtime) ) - def list_users( + def list_code_sources( self, - request: aiwork_space_20210204_models.ListUsersRequest, - ) -> aiwork_space_20210204_models.ListUsersResponse: + request: aiwork_space_20210204_models.ListCodeSourcesRequest, + ) -> aiwork_space_20210204_models.ListCodeSourcesResponse: + """ + @summary 获取代码源配置列表 + + @param request: ListCodeSourcesRequest + @return: ListCodeSourcesResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.list_users_with_options(request, headers, runtime) + return self.list_code_sources_with_options(request, headers, runtime) - async def list_users_async( + async def list_code_sources_async( self, - request: aiwork_space_20210204_models.ListUsersRequest, - ) -> aiwork_space_20210204_models.ListUsersResponse: + request: aiwork_space_20210204_models.ListCodeSourcesRequest, + ) -> aiwork_space_20210204_models.ListCodeSourcesResponse: + """ + @summary 获取代码源配置列表 + + @param request: ListCodeSourcesRequest + @return: ListCodeSourcesResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.list_users_with_options_async(request, headers, runtime) + return await self.list_code_sources_with_options_async(request, headers, runtime) - def list_workspace_users_with_options( + def list_collections_with_options( self, - workspace_id: str, + request: aiwork_space_20210204_models.ListCollectionsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListWorkspaceUsersResponse: + ) -> aiwork_space_20210204_models.ListCollectionsResponse: + """ + @summary 获取Collection列表 + + @param request: ListCollectionsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListCollectionsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListWorkspaceUsers', + action='ListCollections', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/users', + pathname=f'/api/v1/collections', method='GET', auth_type='AK', style='ROA', @@ -5615,24 +8296,39 @@ def list_workspace_users_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListWorkspaceUsersResponse(), + aiwork_space_20210204_models.ListCollectionsResponse(), self.call_api(params, req, runtime) ) - async def list_workspace_users_with_options_async( + async def list_collections_with_options_async( self, - workspace_id: str, + request: aiwork_space_20210204_models.ListCollectionsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListWorkspaceUsersResponse: + ) -> aiwork_space_20210204_models.ListCollectionsResponse: + """ + @summary 获取Collection列表 + + @param request: ListCollectionsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListCollectionsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListWorkspaceUsers', + action='ListCollections', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/users', + pathname=f'/api/v1/collections', method='GET', auth_type='AK', style='ROA', @@ -5640,65 +8336,70 @@ async def list_workspace_users_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListWorkspaceUsersResponse(), + aiwork_space_20210204_models.ListCollectionsResponse(), await self.call_api_async(params, req, runtime) ) - def list_workspace_users( + def list_collections( self, - workspace_id: str, - ) -> aiwork_space_20210204_models.ListWorkspaceUsersResponse: + request: aiwork_space_20210204_models.ListCollectionsRequest, + ) -> aiwork_space_20210204_models.ListCollectionsResponse: + """ + @summary 获取Collection列表 + + @param request: ListCollectionsRequest + @return: ListCollectionsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.list_workspace_users_with_options(workspace_id, headers, runtime) + return self.list_collections_with_options(request, headers, runtime) - async def list_workspace_users_async( + async def list_collections_async( self, - workspace_id: str, - ) -> aiwork_space_20210204_models.ListWorkspaceUsersResponse: + request: aiwork_space_20210204_models.ListCollectionsRequest, + ) -> aiwork_space_20210204_models.ListCollectionsResponse: + """ + @summary 获取Collection列表 + + @param request: ListCollectionsRequest + @return: ListCollectionsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.list_workspace_users_with_options_async(workspace_id, headers, runtime) + return await self.list_collections_with_options_async(request, headers, runtime) - def list_workspaces_with_options( + def list_configs_with_options( self, - request: aiwork_space_20210204_models.ListWorkspacesRequest, + workspace_id: str, + request: aiwork_space_20210204_models.ListConfigsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListWorkspacesResponse: + ) -> aiwork_space_20210204_models.ListConfigsResponse: + """ + @summary 获取配置 + + @param request: ListConfigsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListConfigsResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.fields): - query['Fields'] = request.fields - if not UtilClient.is_unset(request.module_list): - query['ModuleList'] = request.module_list - if not UtilClient.is_unset(request.option): - query['Option'] = request.option - if not UtilClient.is_unset(request.order): - query['Order'] = request.order - if not UtilClient.is_unset(request.page_number): - query['PageNumber'] = request.page_number - if not UtilClient.is_unset(request.page_size): - query['PageSize'] = request.page_size - if not UtilClient.is_unset(request.sort_by): - query['SortBy'] = request.sort_by - if not UtilClient.is_unset(request.status): - query['Status'] = request.status + if not UtilClient.is_unset(request.config_keys): + query['ConfigKeys'] = request.config_keys + if not UtilClient.is_unset(request.labels): + query['Labels'] = request.labels if not UtilClient.is_unset(request.verbose): query['Verbose'] = request.verbose - if not UtilClient.is_unset(request.workspace_ids): - query['WorkspaceIds'] = request.workspace_ids - if not UtilClient.is_unset(request.workspace_name): - query['WorkspaceName'] = request.workspace_name req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListWorkspaces', + action='ListConfigs', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/configs', method='GET', auth_type='AK', style='ROA', @@ -5706,103 +8407,3736 @@ def list_workspaces_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListWorkspacesResponse(), + aiwork_space_20210204_models.ListConfigsResponse(), self.call_api(params, req, runtime) ) - async def list_workspaces_with_options_async( + async def list_configs_with_options_async( self, - request: aiwork_space_20210204_models.ListWorkspacesRequest, + workspace_id: str, + request: aiwork_space_20210204_models.ListConfigsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.ListWorkspacesResponse: + ) -> aiwork_space_20210204_models.ListConfigsResponse: + """ + @summary 获取配置 + + @param request: ListConfigsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListConfigsResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.fields): - query['Fields'] = request.fields - if not UtilClient.is_unset(request.module_list): - query['ModuleList'] = request.module_list - if not UtilClient.is_unset(request.option): - query['Option'] = request.option - if not UtilClient.is_unset(request.order): - query['Order'] = request.order - if not UtilClient.is_unset(request.page_number): - query['PageNumber'] = request.page_number - if not UtilClient.is_unset(request.page_size): - query['PageSize'] = request.page_size - if not UtilClient.is_unset(request.sort_by): - query['SortBy'] = request.sort_by - if not UtilClient.is_unset(request.status): - query['Status'] = request.status + if not UtilClient.is_unset(request.config_keys): + query['ConfigKeys'] = request.config_keys + if not UtilClient.is_unset(request.labels): + query['Labels'] = request.labels if not UtilClient.is_unset(request.verbose): query['Verbose'] = request.verbose - if not UtilClient.is_unset(request.workspace_ids): - query['WorkspaceIds'] = request.workspace_ids - if not UtilClient.is_unset(request.workspace_name): - query['WorkspaceName'] = request.workspace_name req = open_api_models.OpenApiRequest( - headers=headers, - query=OpenApiUtilClient.query(query) + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListConfigs', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/configs', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListConfigsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_configs( + self, + workspace_id: str, + request: aiwork_space_20210204_models.ListConfigsRequest, + ) -> aiwork_space_20210204_models.ListConfigsResponse: + """ + @summary 获取配置 + + @param request: ListConfigsRequest + @return: ListConfigsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_configs_with_options(workspace_id, request, headers, runtime) + + async def list_configs_async( + self, + workspace_id: str, + request: aiwork_space_20210204_models.ListConfigsRequest, + ) -> aiwork_space_20210204_models.ListConfigsResponse: + """ + @summary 获取配置 + + @param request: ListConfigsRequest + @return: ListConfigsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_configs_with_options_async(workspace_id, request, headers, runtime) + + def list_dataset_versions_with_options( + self, + dataset_id: str, + request: aiwork_space_20210204_models.ListDatasetVersionsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListDatasetVersionsResponse: + """ + @summary 获取数据集版本列表 + + @param request: ListDatasetVersionsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListDatasetVersionsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.data_sources_types): + query['DataSourcesTypes'] = request.data_sources_types + if not UtilClient.is_unset(request.label_keys): + query['LabelKeys'] = request.label_keys + if not UtilClient.is_unset(request.lable_values): + query['LableValues'] = request.lable_values + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.properties): + query['Properties'] = request.properties + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.source_id): + query['SourceId'] = request.source_id + if not UtilClient.is_unset(request.source_types): + query['SourceTypes'] = request.source_types + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListDatasetVersions', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/datasets/{OpenApiUtilClient.get_encode_param(dataset_id)}/versions', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListDatasetVersionsResponse(), + self.call_api(params, req, runtime) + ) + + async def list_dataset_versions_with_options_async( + self, + dataset_id: str, + request: aiwork_space_20210204_models.ListDatasetVersionsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListDatasetVersionsResponse: + """ + @summary 获取数据集版本列表 + + @param request: ListDatasetVersionsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListDatasetVersionsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.data_sources_types): + query['DataSourcesTypes'] = request.data_sources_types + if not UtilClient.is_unset(request.label_keys): + query['LabelKeys'] = request.label_keys + if not UtilClient.is_unset(request.lable_values): + query['LableValues'] = request.lable_values + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.properties): + query['Properties'] = request.properties + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.source_id): + query['SourceId'] = request.source_id + if not UtilClient.is_unset(request.source_types): + query['SourceTypes'] = request.source_types + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListDatasetVersions', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/datasets/{OpenApiUtilClient.get_encode_param(dataset_id)}/versions', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListDatasetVersionsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_dataset_versions( + self, + dataset_id: str, + request: aiwork_space_20210204_models.ListDatasetVersionsRequest, + ) -> aiwork_space_20210204_models.ListDatasetVersionsResponse: + """ + @summary 获取数据集版本列表 + + @param request: ListDatasetVersionsRequest + @return: ListDatasetVersionsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_dataset_versions_with_options(dataset_id, request, headers, runtime) + + async def list_dataset_versions_async( + self, + dataset_id: str, + request: aiwork_space_20210204_models.ListDatasetVersionsRequest, + ) -> aiwork_space_20210204_models.ListDatasetVersionsResponse: + """ + @summary 获取数据集版本列表 + + @param request: ListDatasetVersionsRequest + @return: ListDatasetVersionsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_dataset_versions_with_options_async(dataset_id, request, headers, runtime) + + def list_datasets_with_options( + self, + request: aiwork_space_20210204_models.ListDatasetsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListDatasetsResponse: + """ + @summary 获取数据集列表 + + @param request: ListDatasetsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListDatasetsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.data_source_types): + query['DataSourceTypes'] = request.data_source_types + if not UtilClient.is_unset(request.data_types): + query['DataTypes'] = request.data_types + if not UtilClient.is_unset(request.label): + query['Label'] = request.label + if not UtilClient.is_unset(request.name): + query['Name'] = request.name + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.properties): + query['Properties'] = request.properties + if not UtilClient.is_unset(request.provider): + query['Provider'] = request.provider + if not UtilClient.is_unset(request.source_dataset_id): + query['SourceDatasetId'] = request.source_dataset_id + if not UtilClient.is_unset(request.source_id): + query['SourceId'] = request.source_id + if not UtilClient.is_unset(request.source_types): + query['SourceTypes'] = request.source_types + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListDatasets', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/datasets', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListDatasetsResponse(), + self.call_api(params, req, runtime) + ) + + async def list_datasets_with_options_async( + self, + request: aiwork_space_20210204_models.ListDatasetsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListDatasetsResponse: + """ + @summary 获取数据集列表 + + @param request: ListDatasetsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListDatasetsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.data_source_types): + query['DataSourceTypes'] = request.data_source_types + if not UtilClient.is_unset(request.data_types): + query['DataTypes'] = request.data_types + if not UtilClient.is_unset(request.label): + query['Label'] = request.label + if not UtilClient.is_unset(request.name): + query['Name'] = request.name + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.properties): + query['Properties'] = request.properties + if not UtilClient.is_unset(request.provider): + query['Provider'] = request.provider + if not UtilClient.is_unset(request.source_dataset_id): + query['SourceDatasetId'] = request.source_dataset_id + if not UtilClient.is_unset(request.source_id): + query['SourceId'] = request.source_id + if not UtilClient.is_unset(request.source_types): + query['SourceTypes'] = request.source_types + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListDatasets', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/datasets', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListDatasetsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_datasets( + self, + request: aiwork_space_20210204_models.ListDatasetsRequest, + ) -> aiwork_space_20210204_models.ListDatasetsResponse: + """ + @summary 获取数据集列表 + + @param request: ListDatasetsRequest + @return: ListDatasetsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_datasets_with_options(request, headers, runtime) + + async def list_datasets_async( + self, + request: aiwork_space_20210204_models.ListDatasetsRequest, + ) -> aiwork_space_20210204_models.ListDatasetsResponse: + """ + @summary 获取数据集列表 + + @param request: ListDatasetsRequest + @return: ListDatasetsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_datasets_with_options_async(request, headers, runtime) + + def list_experiment_with_options( + self, + request: aiwork_space_20210204_models.ListExperimentRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListExperimentResponse: + """ + @summary 获取实验列表 + + @param request: ListExperimentRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListExperimentResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.labels): + query['Labels'] = request.labels + if not UtilClient.is_unset(request.name): + query['Name'] = request.name + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListExperiment', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/experiments', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListExperimentResponse(), + self.call_api(params, req, runtime) + ) + + async def list_experiment_with_options_async( + self, + request: aiwork_space_20210204_models.ListExperimentRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListExperimentResponse: + """ + @summary 获取实验列表 + + @param request: ListExperimentRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListExperimentResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.labels): + query['Labels'] = request.labels + if not UtilClient.is_unset(request.name): + query['Name'] = request.name + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListExperiment', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/experiments', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListExperimentResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_experiment( + self, + request: aiwork_space_20210204_models.ListExperimentRequest, + ) -> aiwork_space_20210204_models.ListExperimentResponse: + """ + @summary 获取实验列表 + + @param request: ListExperimentRequest + @return: ListExperimentResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_experiment_with_options(request, headers, runtime) + + async def list_experiment_async( + self, + request: aiwork_space_20210204_models.ListExperimentRequest, + ) -> aiwork_space_20210204_models.ListExperimentResponse: + """ + @summary 获取实验列表 + + @param request: ListExperimentRequest + @return: ListExperimentResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_experiment_with_options_async(request, headers, runtime) + + def list_features_with_options( + self, + request: aiwork_space_20210204_models.ListFeaturesRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListFeaturesResponse: + """ + @summary 列举特性 + + @param request: ListFeaturesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListFeaturesResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.names): + query['Names'] = request.names + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListFeatures', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/features', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListFeaturesResponse(), + self.call_api(params, req, runtime) + ) + + async def list_features_with_options_async( + self, + request: aiwork_space_20210204_models.ListFeaturesRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListFeaturesResponse: + """ + @summary 列举特性 + + @param request: ListFeaturesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListFeaturesResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.names): + query['Names'] = request.names + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListFeatures', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/features', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListFeaturesResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_features( + self, + request: aiwork_space_20210204_models.ListFeaturesRequest, + ) -> aiwork_space_20210204_models.ListFeaturesResponse: + """ + @summary 列举特性 + + @param request: ListFeaturesRequest + @return: ListFeaturesResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_features_with_options(request, headers, runtime) + + async def list_features_async( + self, + request: aiwork_space_20210204_models.ListFeaturesRequest, + ) -> aiwork_space_20210204_models.ListFeaturesResponse: + """ + @summary 列举特性 + + @param request: ListFeaturesRequest + @return: ListFeaturesResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_features_with_options_async(request, headers, runtime) + + def list_global_permissions_with_options( + self, + request: aiwork_space_20210204_models.ListGlobalPermissionsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListGlobalPermissionsResponse: + """ + @summary 获取用户全局权限 + + @param request: ListGlobalPermissionsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListGlobalPermissionsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.module_names): + query['ModuleNames'] = request.module_names + if not UtilClient.is_unset(request.operation_type): + query['OperationType'] = request.operation_type + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.resource_types): + query['ResourceTypes'] = request.resource_types + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListGlobalPermissions', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/permissions', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListGlobalPermissionsResponse(), + self.call_api(params, req, runtime) + ) + + async def list_global_permissions_with_options_async( + self, + request: aiwork_space_20210204_models.ListGlobalPermissionsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListGlobalPermissionsResponse: + """ + @summary 获取用户全局权限 + + @param request: ListGlobalPermissionsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListGlobalPermissionsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.module_names): + query['ModuleNames'] = request.module_names + if not UtilClient.is_unset(request.operation_type): + query['OperationType'] = request.operation_type + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.resource_types): + query['ResourceTypes'] = request.resource_types + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListGlobalPermissions', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/permissions', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListGlobalPermissionsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_global_permissions( + self, + request: aiwork_space_20210204_models.ListGlobalPermissionsRequest, + ) -> aiwork_space_20210204_models.ListGlobalPermissionsResponse: + """ + @summary 获取用户全局权限 + + @param request: ListGlobalPermissionsRequest + @return: ListGlobalPermissionsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_global_permissions_with_options(request, headers, runtime) + + async def list_global_permissions_async( + self, + request: aiwork_space_20210204_models.ListGlobalPermissionsRequest, + ) -> aiwork_space_20210204_models.ListGlobalPermissionsResponse: + """ + @summary 获取用户全局权限 + + @param request: ListGlobalPermissionsRequest + @return: ListGlobalPermissionsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_global_permissions_with_options_async(request, headers, runtime) + + def list_image_label_keys_with_options( + self, + request: aiwork_space_20210204_models.ListImageLabelKeysRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListImageLabelKeysResponse: + """ + @summary 列举匹配标签前缀的所有标签 + + @param request: ListImageLabelKeysRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListImageLabelKeysResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.label_key_prefixes): + query['LabelKeyPrefixes'] = request.label_key_prefixes + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListImageLabelKeys', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/image/labelkeys', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListImageLabelKeysResponse(), + self.call_api(params, req, runtime) + ) + + async def list_image_label_keys_with_options_async( + self, + request: aiwork_space_20210204_models.ListImageLabelKeysRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListImageLabelKeysResponse: + """ + @summary 列举匹配标签前缀的所有标签 + + @param request: ListImageLabelKeysRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListImageLabelKeysResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.label_key_prefixes): + query['LabelKeyPrefixes'] = request.label_key_prefixes + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListImageLabelKeys', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/image/labelkeys', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListImageLabelKeysResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_image_label_keys( + self, + request: aiwork_space_20210204_models.ListImageLabelKeysRequest, + ) -> aiwork_space_20210204_models.ListImageLabelKeysResponse: + """ + @summary 列举匹配标签前缀的所有标签 + + @param request: ListImageLabelKeysRequest + @return: ListImageLabelKeysResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_image_label_keys_with_options(request, headers, runtime) + + async def list_image_label_keys_async( + self, + request: aiwork_space_20210204_models.ListImageLabelKeysRequest, + ) -> aiwork_space_20210204_models.ListImageLabelKeysResponse: + """ + @summary 列举匹配标签前缀的所有标签 + + @param request: ListImageLabelKeysRequest + @return: ListImageLabelKeysResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_image_label_keys_with_options_async(request, headers, runtime) + + def list_image_labels_with_options( + self, + request: aiwork_space_20210204_models.ListImageLabelsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListImageLabelsResponse: + """ + @summary 列举标签 + + @param request: ListImageLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListImageLabelsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.image_id): + query['ImageId'] = request.image_id + if not UtilClient.is_unset(request.label_filter): + query['LabelFilter'] = request.label_filter + if not UtilClient.is_unset(request.label_keys): + query['LabelKeys'] = request.label_keys + if not UtilClient.is_unset(request.region): + query['Region'] = request.region + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListImageLabels', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/image/labels', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListImageLabelsResponse(), + self.call_api(params, req, runtime) + ) + + async def list_image_labels_with_options_async( + self, + request: aiwork_space_20210204_models.ListImageLabelsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListImageLabelsResponse: + """ + @summary 列举标签 + + @param request: ListImageLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListImageLabelsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.image_id): + query['ImageId'] = request.image_id + if not UtilClient.is_unset(request.label_filter): + query['LabelFilter'] = request.label_filter + if not UtilClient.is_unset(request.label_keys): + query['LabelKeys'] = request.label_keys + if not UtilClient.is_unset(request.region): + query['Region'] = request.region + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListImageLabels', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/image/labels', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListImageLabelsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_image_labels( + self, + request: aiwork_space_20210204_models.ListImageLabelsRequest, + ) -> aiwork_space_20210204_models.ListImageLabelsResponse: + """ + @summary 列举标签 + + @param request: ListImageLabelsRequest + @return: ListImageLabelsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_image_labels_with_options(request, headers, runtime) + + async def list_image_labels_async( + self, + request: aiwork_space_20210204_models.ListImageLabelsRequest, + ) -> aiwork_space_20210204_models.ListImageLabelsResponse: + """ + @summary 列举标签 + + @param request: ListImageLabelsRequest + @return: ListImageLabelsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_image_labels_with_options_async(request, headers, runtime) + + def list_images_with_options( + self, + request: aiwork_space_20210204_models.ListImagesRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListImagesResponse: + """ + @summary 列举已注册镜像 + + @param request: ListImagesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListImagesResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.accessibility): + query['Accessibility'] = request.accessibility + if not UtilClient.is_unset(request.labels): + query['Labels'] = request.labels + if not UtilClient.is_unset(request.name): + query['Name'] = request.name + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.parent_user_id): + query['ParentUserId'] = request.parent_user_id + if not UtilClient.is_unset(request.query): + query['Query'] = request.query + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.user_id): + query['UserId'] = request.user_id + if not UtilClient.is_unset(request.verbose): + query['Verbose'] = request.verbose + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListImages', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/images', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListImagesResponse(), + self.call_api(params, req, runtime) + ) + + async def list_images_with_options_async( + self, + request: aiwork_space_20210204_models.ListImagesRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListImagesResponse: + """ + @summary 列举已注册镜像 + + @param request: ListImagesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListImagesResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.accessibility): + query['Accessibility'] = request.accessibility + if not UtilClient.is_unset(request.labels): + query['Labels'] = request.labels + if not UtilClient.is_unset(request.name): + query['Name'] = request.name + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.parent_user_id): + query['ParentUserId'] = request.parent_user_id + if not UtilClient.is_unset(request.query): + query['Query'] = request.query + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.user_id): + query['UserId'] = request.user_id + if not UtilClient.is_unset(request.verbose): + query['Verbose'] = request.verbose + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListImages', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/images', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListImagesResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_images( + self, + request: aiwork_space_20210204_models.ListImagesRequest, + ) -> aiwork_space_20210204_models.ListImagesResponse: + """ + @summary 列举已注册镜像 + + @param request: ListImagesRequest + @return: ListImagesResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_images_with_options(request, headers, runtime) + + async def list_images_async( + self, + request: aiwork_space_20210204_models.ListImagesRequest, + ) -> aiwork_space_20210204_models.ListImagesResponse: + """ + @summary 列举已注册镜像 + + @param request: ListImagesRequest + @return: ListImagesResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_images_with_options_async(request, headers, runtime) + + def list_members_with_options( + self, + workspace_id: str, + request: aiwork_space_20210204_models.ListMembersRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListMembersResponse: + """ + @summary 列举工作空间成员 + + @param request: ListMembersRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListMembersResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.member_name): + query['MemberName'] = request.member_name + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.roles): + query['Roles'] = request.roles + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListMembers', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/members', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListMembersResponse(), + self.call_api(params, req, runtime) + ) + + async def list_members_with_options_async( + self, + workspace_id: str, + request: aiwork_space_20210204_models.ListMembersRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListMembersResponse: + """ + @summary 列举工作空间成员 + + @param request: ListMembersRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListMembersResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.member_name): + query['MemberName'] = request.member_name + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.roles): + query['Roles'] = request.roles + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListMembers', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/members', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListMembersResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_members( + self, + workspace_id: str, + request: aiwork_space_20210204_models.ListMembersRequest, + ) -> aiwork_space_20210204_models.ListMembersResponse: + """ + @summary 列举工作空间成员 + + @param request: ListMembersRequest + @return: ListMembersResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_members_with_options(workspace_id, request, headers, runtime) + + async def list_members_async( + self, + workspace_id: str, + request: aiwork_space_20210204_models.ListMembersRequest, + ) -> aiwork_space_20210204_models.ListMembersResponse: + """ + @summary 列举工作空间成员 + + @param request: ListMembersRequest + @return: ListMembersResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_members_with_options_async(workspace_id, request, headers, runtime) + + def list_model_domains_with_options( + self, + request: aiwork_space_20210204_models.ListModelDomainsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListModelDomainsResponse: + """ + @summary 获取模型领域列表 + + @param request: ListModelDomainsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListModelDomainsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.model_domain_ids): + query['ModelDomainIds'] = request.model_domain_ids + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListModelDomains', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/modeldomains', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListModelDomainsResponse(), + self.call_api(params, req, runtime) + ) + + async def list_model_domains_with_options_async( + self, + request: aiwork_space_20210204_models.ListModelDomainsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListModelDomainsResponse: + """ + @summary 获取模型领域列表 + + @param request: ListModelDomainsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListModelDomainsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.model_domain_ids): + query['ModelDomainIds'] = request.model_domain_ids + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListModelDomains', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/modeldomains', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListModelDomainsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_model_domains( + self, + request: aiwork_space_20210204_models.ListModelDomainsRequest, + ) -> aiwork_space_20210204_models.ListModelDomainsResponse: + """ + @summary 获取模型领域列表 + + @param request: ListModelDomainsRequest + @return: ListModelDomainsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_model_domains_with_options(request, headers, runtime) + + async def list_model_domains_async( + self, + request: aiwork_space_20210204_models.ListModelDomainsRequest, + ) -> aiwork_space_20210204_models.ListModelDomainsResponse: + """ + @summary 获取模型领域列表 + + @param request: ListModelDomainsRequest + @return: ListModelDomainsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_model_domains_with_options_async(request, headers, runtime) + + def list_model_versions_with_options( + self, + model_id: str, + request: aiwork_space_20210204_models.ListModelVersionsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListModelVersionsResponse: + """ + @summary 获取模型版本列表 + + @param request: ListModelVersionsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListModelVersionsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.approval_status): + query['ApprovalStatus'] = request.approval_status + if not UtilClient.is_unset(request.format_type): + query['FormatType'] = request.format_type + if not UtilClient.is_unset(request.framework_type): + query['FrameworkType'] = request.framework_type + if not UtilClient.is_unset(request.label): + query['Label'] = request.label + if not UtilClient.is_unset(request.label_string): + query['LabelString'] = request.label_string + if not UtilClient.is_unset(request.labels): + query['Labels'] = request.labels + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.source_id): + query['SourceId'] = request.source_id + if not UtilClient.is_unset(request.source_type): + query['SourceType'] = request.source_type + if not UtilClient.is_unset(request.version_name): + query['VersionName'] = request.version_name + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListModelVersions', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}/versions', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListModelVersionsResponse(), + self.call_api(params, req, runtime) + ) + + async def list_model_versions_with_options_async( + self, + model_id: str, + request: aiwork_space_20210204_models.ListModelVersionsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListModelVersionsResponse: + """ + @summary 获取模型版本列表 + + @param request: ListModelVersionsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListModelVersionsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.approval_status): + query['ApprovalStatus'] = request.approval_status + if not UtilClient.is_unset(request.format_type): + query['FormatType'] = request.format_type + if not UtilClient.is_unset(request.framework_type): + query['FrameworkType'] = request.framework_type + if not UtilClient.is_unset(request.label): + query['Label'] = request.label + if not UtilClient.is_unset(request.label_string): + query['LabelString'] = request.label_string + if not UtilClient.is_unset(request.labels): + query['Labels'] = request.labels + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.source_id): + query['SourceId'] = request.source_id + if not UtilClient.is_unset(request.source_type): + query['SourceType'] = request.source_type + if not UtilClient.is_unset(request.version_name): + query['VersionName'] = request.version_name + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListModelVersions', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/models/{OpenApiUtilClient.get_encode_param(model_id)}/versions', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListModelVersionsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_model_versions( + self, + model_id: str, + request: aiwork_space_20210204_models.ListModelVersionsRequest, + ) -> aiwork_space_20210204_models.ListModelVersionsResponse: + """ + @summary 获取模型版本列表 + + @param request: ListModelVersionsRequest + @return: ListModelVersionsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_model_versions_with_options(model_id, request, headers, runtime) + + async def list_model_versions_async( + self, + model_id: str, + request: aiwork_space_20210204_models.ListModelVersionsRequest, + ) -> aiwork_space_20210204_models.ListModelVersionsResponse: + """ + @summary 获取模型版本列表 + + @param request: ListModelVersionsRequest + @return: ListModelVersionsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_model_versions_with_options_async(model_id, request, headers, runtime) + + def list_models_with_options( + self, + request: aiwork_space_20210204_models.ListModelsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListModelsResponse: + """ + @summary 获取模型列表 + + @param request: ListModelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListModelsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.collections): + query['Collections'] = request.collections + if not UtilClient.is_unset(request.domain): + query['Domain'] = request.domain + if not UtilClient.is_unset(request.label): + query['Label'] = request.label + if not UtilClient.is_unset(request.label_string): + query['LabelString'] = request.label_string + if not UtilClient.is_unset(request.labels): + query['Labels'] = request.labels + if not UtilClient.is_unset(request.model_name): + query['ModelName'] = request.model_name + if not UtilClient.is_unset(request.model_type): + query['ModelType'] = request.model_type + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.origin): + query['Origin'] = request.origin + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.provider): + query['Provider'] = request.provider + if not UtilClient.is_unset(request.query): + query['Query'] = request.query + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.task): + query['Task'] = request.task + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListModels', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/models', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListModelsResponse(), + self.call_api(params, req, runtime) + ) + + async def list_models_with_options_async( + self, + request: aiwork_space_20210204_models.ListModelsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListModelsResponse: + """ + @summary 获取模型列表 + + @param request: ListModelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListModelsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.collections): + query['Collections'] = request.collections + if not UtilClient.is_unset(request.domain): + query['Domain'] = request.domain + if not UtilClient.is_unset(request.label): + query['Label'] = request.label + if not UtilClient.is_unset(request.label_string): + query['LabelString'] = request.label_string + if not UtilClient.is_unset(request.labels): + query['Labels'] = request.labels + if not UtilClient.is_unset(request.model_name): + query['ModelName'] = request.model_name + if not UtilClient.is_unset(request.model_type): + query['ModelType'] = request.model_type + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.origin): + query['Origin'] = request.origin + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.provider): + query['Provider'] = request.provider + if not UtilClient.is_unset(request.query): + query['Query'] = request.query + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.task): + query['Task'] = request.task + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListModels', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/models', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListModelsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_models( + self, + request: aiwork_space_20210204_models.ListModelsRequest, + ) -> aiwork_space_20210204_models.ListModelsResponse: + """ + @summary 获取模型列表 + + @param request: ListModelsRequest + @return: ListModelsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_models_with_options(request, headers, runtime) + + async def list_models_async( + self, + request: aiwork_space_20210204_models.ListModelsRequest, + ) -> aiwork_space_20210204_models.ListModelsResponse: + """ + @summary 获取模型列表 + + @param request: ListModelsRequest + @return: ListModelsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_models_with_options_async(request, headers, runtime) + + def list_module_configs_with_options( + self, + request: aiwork_space_20210204_models.ListModuleConfigsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListModuleConfigsResponse: + """ + @summary 列举PAI云产品的配置 + + @param request: ListModuleConfigsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListModuleConfigsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.module_codes): + query['ModuleCodes'] = request.module_codes + if not UtilClient.is_unset(request.region): + query['Region'] = request.region + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListModuleConfigs', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/moduleconfigs', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListModuleConfigsResponse(), + self.call_api(params, req, runtime) + ) + + async def list_module_configs_with_options_async( + self, + request: aiwork_space_20210204_models.ListModuleConfigsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListModuleConfigsResponse: + """ + @summary 列举PAI云产品的配置 + + @param request: ListModuleConfigsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListModuleConfigsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.module_codes): + query['ModuleCodes'] = request.module_codes + if not UtilClient.is_unset(request.region): + query['Region'] = request.region + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListModuleConfigs', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/moduleconfigs', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListModuleConfigsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_module_configs( + self, + request: aiwork_space_20210204_models.ListModuleConfigsRequest, + ) -> aiwork_space_20210204_models.ListModuleConfigsResponse: + """ + @summary 列举PAI云产品的配置 + + @param request: ListModuleConfigsRequest + @return: ListModuleConfigsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_module_configs_with_options(request, headers, runtime) + + async def list_module_configs_async( + self, + request: aiwork_space_20210204_models.ListModuleConfigsRequest, + ) -> aiwork_space_20210204_models.ListModuleConfigsResponse: + """ + @summary 列举PAI云产品的配置 + + @param request: ListModuleConfigsRequest + @return: ListModuleConfigsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_module_configs_with_options_async(request, headers, runtime) + + def list_operation_logs_with_options( + self, + workspace_id: str, + request: aiwork_space_20210204_models.ListOperationLogsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListOperationLogsResponse: + """ + @summary 列出操作日志 + + @param request: ListOperationLogsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListOperationLogsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.entity_status): + query['EntityStatus'] = request.entity_status + if not UtilClient.is_unset(request.entity_types): + query['EntityTypes'] = request.entity_types + if not UtilClient.is_unset(request.operations): + query['Operations'] = request.operations + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListOperationLogs', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/logs', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListOperationLogsResponse(), + self.call_api(params, req, runtime) + ) + + async def list_operation_logs_with_options_async( + self, + workspace_id: str, + request: aiwork_space_20210204_models.ListOperationLogsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListOperationLogsResponse: + """ + @summary 列出操作日志 + + @param request: ListOperationLogsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListOperationLogsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.entity_status): + query['EntityStatus'] = request.entity_status + if not UtilClient.is_unset(request.entity_types): + query['EntityTypes'] = request.entity_types + if not UtilClient.is_unset(request.operations): + query['Operations'] = request.operations + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListOperationLogs', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/logs', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListOperationLogsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_operation_logs( + self, + workspace_id: str, + request: aiwork_space_20210204_models.ListOperationLogsRequest, + ) -> aiwork_space_20210204_models.ListOperationLogsResponse: + """ + @summary 列出操作日志 + + @param request: ListOperationLogsRequest + @return: ListOperationLogsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_operation_logs_with_options(workspace_id, request, headers, runtime) + + async def list_operation_logs_async( + self, + workspace_id: str, + request: aiwork_space_20210204_models.ListOperationLogsRequest, + ) -> aiwork_space_20210204_models.ListOperationLogsResponse: + """ + @summary 列出操作日志 + + @param request: ListOperationLogsRequest + @return: ListOperationLogsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_operation_logs_with_options_async(workspace_id, request, headers, runtime) + + def list_permissions_with_options( + self, + workspace_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListPermissionsResponse: + """ + @summary 列举权限 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListPermissionsResponse + """ + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='ListPermissions', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/permissions', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListPermissionsResponse(), + self.call_api(params, req, runtime) + ) + + async def list_permissions_with_options_async( + self, + workspace_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListPermissionsResponse: + """ + @summary 列举权限 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListPermissionsResponse + """ + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='ListPermissions', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/permissions', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListPermissionsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_permissions( + self, + workspace_id: str, + ) -> aiwork_space_20210204_models.ListPermissionsResponse: + """ + @summary 列举权限 + + @return: ListPermissionsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_permissions_with_options(workspace_id, headers, runtime) + + async def list_permissions_async( + self, + workspace_id: str, + ) -> aiwork_space_20210204_models.ListPermissionsResponse: + """ + @summary 列举权限 + + @return: ListPermissionsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_permissions_with_options_async(workspace_id, headers, runtime) + + def list_product_authorizations_with_options( + self, + request: aiwork_space_20210204_models.ListProductAuthorizationsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListProductAuthorizationsResponse: + """ + @summary 获取产品授权 + + @param request: ListProductAuthorizationsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListProductAuthorizationsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.ram_role_names): + query['RamRoleNames'] = request.ram_role_names + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListProductAuthorizations', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/productauthorizations', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListProductAuthorizationsResponse(), + self.call_api(params, req, runtime) + ) + + async def list_product_authorizations_with_options_async( + self, + request: aiwork_space_20210204_models.ListProductAuthorizationsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListProductAuthorizationsResponse: + """ + @summary 获取产品授权 + + @param request: ListProductAuthorizationsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListProductAuthorizationsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.ram_role_names): + query['RamRoleNames'] = request.ram_role_names + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListProductAuthorizations', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/productauthorizations', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListProductAuthorizationsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_product_authorizations( + self, + request: aiwork_space_20210204_models.ListProductAuthorizationsRequest, + ) -> aiwork_space_20210204_models.ListProductAuthorizationsResponse: + """ + @summary 获取产品授权 + + @param request: ListProductAuthorizationsRequest + @return: ListProductAuthorizationsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_product_authorizations_with_options(request, headers, runtime) + + async def list_product_authorizations_async( + self, + request: aiwork_space_20210204_models.ListProductAuthorizationsRequest, + ) -> aiwork_space_20210204_models.ListProductAuthorizationsResponse: + """ + @summary 获取产品授权 + + @param request: ListProductAuthorizationsRequest + @return: ListProductAuthorizationsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_product_authorizations_with_options_async(request, headers, runtime) + + def list_products_with_options( + self, + request: aiwork_space_20210204_models.ListProductsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListProductsResponse: + """ + @summary 列举产品 + + @param request: ListProductsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListProductsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.product_codes): + query['ProductCodes'] = request.product_codes + if not UtilClient.is_unset(request.service_codes): + query['ServiceCodes'] = request.service_codes + if not UtilClient.is_unset(request.verbose): + query['Verbose'] = request.verbose + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListProducts', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/products', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListProductsResponse(), + self.call_api(params, req, runtime) + ) + + async def list_products_with_options_async( + self, + request: aiwork_space_20210204_models.ListProductsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListProductsResponse: + """ + @summary 列举产品 + + @param request: ListProductsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListProductsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.product_codes): + query['ProductCodes'] = request.product_codes + if not UtilClient.is_unset(request.service_codes): + query['ServiceCodes'] = request.service_codes + if not UtilClient.is_unset(request.verbose): + query['Verbose'] = request.verbose + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListProducts', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/products', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListProductsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_products( + self, + request: aiwork_space_20210204_models.ListProductsRequest, + ) -> aiwork_space_20210204_models.ListProductsResponse: + """ + @summary 列举产品 + + @param request: ListProductsRequest + @return: ListProductsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_products_with_options(request, headers, runtime) + + async def list_products_async( + self, + request: aiwork_space_20210204_models.ListProductsRequest, + ) -> aiwork_space_20210204_models.ListProductsResponse: + """ + @summary 列举产品 + + @param request: ListProductsRequest + @return: ListProductsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_products_with_options_async(request, headers, runtime) + + def list_quotas_with_options( + self, + request: aiwork_space_20210204_models.ListQuotasRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListQuotasResponse: + """ + @summary 获取已有配额列表 + + @param request: ListQuotasRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListQuotasResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.name): + query['Name'] = request.name + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListQuotas', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/quotas', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListQuotasResponse(), + self.call_api(params, req, runtime) + ) + + async def list_quotas_with_options_async( + self, + request: aiwork_space_20210204_models.ListQuotasRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListQuotasResponse: + """ + @summary 获取已有配额列表 + + @param request: ListQuotasRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListQuotasResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.name): + query['Name'] = request.name + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListQuotas', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/quotas', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListQuotasResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_quotas( + self, + request: aiwork_space_20210204_models.ListQuotasRequest, + ) -> aiwork_space_20210204_models.ListQuotasResponse: + """ + @summary 获取已有配额列表 + + @param request: ListQuotasRequest + @return: ListQuotasResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_quotas_with_options(request, headers, runtime) + + async def list_quotas_async( + self, + request: aiwork_space_20210204_models.ListQuotasRequest, + ) -> aiwork_space_20210204_models.ListQuotasResponse: + """ + @summary 获取已有配额列表 + + @param request: ListQuotasRequest + @return: ListQuotasResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_quotas_with_options_async(request, headers, runtime) + + def list_resources_with_options( + self, + request: aiwork_space_20210204_models.ListResourcesRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListResourcesResponse: + """ + @summary 列举工作空间资源 + + @param request: ListResourcesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListResourcesResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.group_name): + query['GroupName'] = request.group_name + if not UtilClient.is_unset(request.labels): + query['Labels'] = request.labels + if not UtilClient.is_unset(request.option): + query['Option'] = request.option + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.product_types): + query['ProductTypes'] = request.product_types + if not UtilClient.is_unset(request.quota_ids): + query['QuotaIds'] = request.quota_ids + if not UtilClient.is_unset(request.resource_name): + query['ResourceName'] = request.resource_name + if not UtilClient.is_unset(request.resource_types): + query['ResourceTypes'] = request.resource_types + if not UtilClient.is_unset(request.verbose): + query['Verbose'] = request.verbose + if not UtilClient.is_unset(request.verbose_fields): + query['VerboseFields'] = request.verbose_fields + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListResources', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/resources', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListResourcesResponse(), + self.call_api(params, req, runtime) + ) + + async def list_resources_with_options_async( + self, + request: aiwork_space_20210204_models.ListResourcesRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListResourcesResponse: + """ + @summary 列举工作空间资源 + + @param request: ListResourcesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListResourcesResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.group_name): + query['GroupName'] = request.group_name + if not UtilClient.is_unset(request.labels): + query['Labels'] = request.labels + if not UtilClient.is_unset(request.option): + query['Option'] = request.option + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.product_types): + query['ProductTypes'] = request.product_types + if not UtilClient.is_unset(request.quota_ids): + query['QuotaIds'] = request.quota_ids + if not UtilClient.is_unset(request.resource_name): + query['ResourceName'] = request.resource_name + if not UtilClient.is_unset(request.resource_types): + query['ResourceTypes'] = request.resource_types + if not UtilClient.is_unset(request.verbose): + query['Verbose'] = request.verbose + if not UtilClient.is_unset(request.verbose_fields): + query['VerboseFields'] = request.verbose_fields + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListResources', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/resources', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListResourcesResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_resources( + self, + request: aiwork_space_20210204_models.ListResourcesRequest, + ) -> aiwork_space_20210204_models.ListResourcesResponse: + """ + @summary 列举工作空间资源 + + @param request: ListResourcesRequest + @return: ListResourcesResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_resources_with_options(request, headers, runtime) + + async def list_resources_async( + self, + request: aiwork_space_20210204_models.ListResourcesRequest, + ) -> aiwork_space_20210204_models.ListResourcesResponse: + """ + @summary 列举工作空间资源 + + @param request: ListResourcesRequest + @return: ListResourcesResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_resources_with_options_async(request, headers, runtime) + + def list_service_templates_with_options( + self, + request: aiwork_space_20210204_models.ListServiceTemplatesRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListServiceTemplatesResponse: + """ + @summary 获取服务模版列表 + + @param request: ListServiceTemplatesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListServiceTemplatesResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.label): + query['Label'] = request.label + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.provider): + query['Provider'] = request.provider + if not UtilClient.is_unset(request.query): + query['Query'] = request.query + if not UtilClient.is_unset(request.service_template_name): + query['ServiceTemplateName'] = request.service_template_name + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListServiceTemplates', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/servicetemplates', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListServiceTemplatesResponse(), + self.call_api(params, req, runtime) + ) + + async def list_service_templates_with_options_async( + self, + request: aiwork_space_20210204_models.ListServiceTemplatesRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListServiceTemplatesResponse: + """ + @summary 获取服务模版列表 + + @param request: ListServiceTemplatesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListServiceTemplatesResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.label): + query['Label'] = request.label + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.provider): + query['Provider'] = request.provider + if not UtilClient.is_unset(request.query): + query['Query'] = request.query + if not UtilClient.is_unset(request.service_template_name): + query['ServiceTemplateName'] = request.service_template_name + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListServiceTemplates', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/servicetemplates', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListServiceTemplatesResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_service_templates( + self, + request: aiwork_space_20210204_models.ListServiceTemplatesRequest, + ) -> aiwork_space_20210204_models.ListServiceTemplatesResponse: + """ + @summary 获取服务模版列表 + + @param request: ListServiceTemplatesRequest + @return: ListServiceTemplatesResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_service_templates_with_options(request, headers, runtime) + + async def list_service_templates_async( + self, + request: aiwork_space_20210204_models.ListServiceTemplatesRequest, + ) -> aiwork_space_20210204_models.ListServiceTemplatesResponse: + """ + @summary 获取服务模版列表 + + @param request: ListServiceTemplatesRequest + @return: ListServiceTemplatesResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_service_templates_with_options_async(request, headers, runtime) + + def list_user_configs_with_options( + self, + request: aiwork_space_20210204_models.ListUserConfigsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListUserConfigsResponse: + """ + @summary 获取用户配置 + + @param request: ListUserConfigsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListUserConfigsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.category_names): + query['CategoryNames'] = request.category_names + if not UtilClient.is_unset(request.config_keys): + query['ConfigKeys'] = request.config_keys + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListUserConfigs', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/userconfigs', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListUserConfigsResponse(), + self.call_api(params, req, runtime) + ) + + async def list_user_configs_with_options_async( + self, + request: aiwork_space_20210204_models.ListUserConfigsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListUserConfigsResponse: + """ + @summary 获取用户配置 + + @param request: ListUserConfigsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListUserConfigsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.category_names): + query['CategoryNames'] = request.category_names + if not UtilClient.is_unset(request.config_keys): + query['ConfigKeys'] = request.config_keys + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListUserConfigs', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/userconfigs', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListUserConfigsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_user_configs( + self, + request: aiwork_space_20210204_models.ListUserConfigsRequest, + ) -> aiwork_space_20210204_models.ListUserConfigsResponse: + """ + @summary 获取用户配置 + + @param request: ListUserConfigsRequest + @return: ListUserConfigsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_user_configs_with_options(request, headers, runtime) + + async def list_user_configs_async( + self, + request: aiwork_space_20210204_models.ListUserConfigsRequest, + ) -> aiwork_space_20210204_models.ListUserConfigsResponse: + """ + @summary 获取用户配置 + + @param request: ListUserConfigsRequest + @return: ListUserConfigsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_user_configs_with_options_async(request, headers, runtime) + + def list_users_with_options( + self, + request: aiwork_space_20210204_models.ListUsersRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListUsersResponse: + """ + @summary 列出用户 + + @param request: ListUsersRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListUsersResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.account_types): + query['AccountTypes'] = request.account_types + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.user_ids): + query['UserIds'] = request.user_ids + if not UtilClient.is_unset(request.user_name): + query['UserName'] = request.user_name + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListUsers', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/users', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListUsersResponse(), + self.call_api(params, req, runtime) + ) + + async def list_users_with_options_async( + self, + request: aiwork_space_20210204_models.ListUsersRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListUsersResponse: + """ + @summary 列出用户 + + @param request: ListUsersRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListUsersResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.account_types): + query['AccountTypes'] = request.account_types + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.user_ids): + query['UserIds'] = request.user_ids + if not UtilClient.is_unset(request.user_name): + query['UserName'] = request.user_name + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListUsers', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/users', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListUsersResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_users( + self, + request: aiwork_space_20210204_models.ListUsersRequest, + ) -> aiwork_space_20210204_models.ListUsersResponse: + """ + @summary 列出用户 + + @param request: ListUsersRequest + @return: ListUsersResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_users_with_options(request, headers, runtime) + + async def list_users_async( + self, + request: aiwork_space_20210204_models.ListUsersRequest, + ) -> aiwork_space_20210204_models.ListUsersResponse: + """ + @summary 列出用户 + + @param request: ListUsersRequest + @return: ListUsersResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_users_with_options_async(request, headers, runtime) + + def list_workspace_permissions_with_options( + self, + workspace_id: str, + request: aiwork_space_20210204_models.ListWorkspacePermissionsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListWorkspacePermissionsResponse: + """ + @summary 批量获取权限 + + @param request: ListWorkspacePermissionsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListWorkspacePermissionsResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.options): + body['Options'] = request.options + if not UtilClient.is_unset(request.permissions): + body['Permissions'] = request.permissions + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='ListWorkspacePermissions', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/permissions/action/list', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListWorkspacePermissionsResponse(), + self.call_api(params, req, runtime) + ) + + async def list_workspace_permissions_with_options_async( + self, + workspace_id: str, + request: aiwork_space_20210204_models.ListWorkspacePermissionsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListWorkspacePermissionsResponse: + """ + @summary 批量获取权限 + + @param request: ListWorkspacePermissionsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListWorkspacePermissionsResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.options): + body['Options'] = request.options + if not UtilClient.is_unset(request.permissions): + body['Permissions'] = request.permissions + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='ListWorkspacePermissions', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/permissions/action/list', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListWorkspacePermissionsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_workspace_permissions( + self, + workspace_id: str, + request: aiwork_space_20210204_models.ListWorkspacePermissionsRequest, + ) -> aiwork_space_20210204_models.ListWorkspacePermissionsResponse: + """ + @summary 批量获取权限 + + @param request: ListWorkspacePermissionsRequest + @return: ListWorkspacePermissionsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_workspace_permissions_with_options(workspace_id, request, headers, runtime) + + async def list_workspace_permissions_async( + self, + workspace_id: str, + request: aiwork_space_20210204_models.ListWorkspacePermissionsRequest, + ) -> aiwork_space_20210204_models.ListWorkspacePermissionsResponse: + """ + @summary 批量获取权限 + + @param request: ListWorkspacePermissionsRequest + @return: ListWorkspacePermissionsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_workspace_permissions_with_options_async(workspace_id, request, headers, runtime) + + def list_workspace_roles_with_options( + self, + workspace_id: str, + request: aiwork_space_20210204_models.ListWorkspaceRolesRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListWorkspaceRolesResponse: + """ + @summary 列举工作空间角色 + + @param request: ListWorkspaceRolesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListWorkspaceRolesResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.role_ids): + query['RoleIds'] = request.role_ids + if not UtilClient.is_unset(request.role_name): + query['RoleName'] = request.role_name + if not UtilClient.is_unset(request.role_type): + query['RoleType'] = request.role_type + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.status): + query['Status'] = request.status + if not UtilClient.is_unset(request.verbose_fields): + query['VerboseFields'] = request.verbose_fields + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListWorkspaceRoles', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/roles', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListWorkspaceRolesResponse(), + self.call_api(params, req, runtime) + ) + + async def list_workspace_roles_with_options_async( + self, + workspace_id: str, + request: aiwork_space_20210204_models.ListWorkspaceRolesRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListWorkspaceRolesResponse: + """ + @summary 列举工作空间角色 + + @param request: ListWorkspaceRolesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListWorkspaceRolesResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.role_ids): + query['RoleIds'] = request.role_ids + if not UtilClient.is_unset(request.role_name): + query['RoleName'] = request.role_name + if not UtilClient.is_unset(request.role_type): + query['RoleType'] = request.role_type + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.status): + query['Status'] = request.status + if not UtilClient.is_unset(request.verbose_fields): + query['VerboseFields'] = request.verbose_fields + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListWorkspaceRoles', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/roles', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListWorkspaceRolesResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_workspace_roles( + self, + workspace_id: str, + request: aiwork_space_20210204_models.ListWorkspaceRolesRequest, + ) -> aiwork_space_20210204_models.ListWorkspaceRolesResponse: + """ + @summary 列举工作空间角色 + + @param request: ListWorkspaceRolesRequest + @return: ListWorkspaceRolesResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_workspace_roles_with_options(workspace_id, request, headers, runtime) + + async def list_workspace_roles_async( + self, + workspace_id: str, + request: aiwork_space_20210204_models.ListWorkspaceRolesRequest, + ) -> aiwork_space_20210204_models.ListWorkspaceRolesResponse: + """ + @summary 列举工作空间角色 + + @param request: ListWorkspaceRolesRequest + @return: ListWorkspaceRolesResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_workspace_roles_with_options_async(workspace_id, request, headers, runtime) + + def list_workspace_users_with_options( + self, + workspace_id: str, + request: aiwork_space_20210204_models.ListWorkspaceUsersRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListWorkspaceUsersResponse: + """ + @summary 列出工作空间的可变为成员的用户 + + @param request: ListWorkspaceUsersRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListWorkspaceUsersResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.user_name): + query['UserName'] = request.user_name + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListWorkspaceUsers', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/users', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListWorkspaceUsersResponse(), + self.call_api(params, req, runtime) + ) + + async def list_workspace_users_with_options_async( + self, + workspace_id: str, + request: aiwork_space_20210204_models.ListWorkspaceUsersRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListWorkspaceUsersResponse: + """ + @summary 列出工作空间的可变为成员的用户 + + @param request: ListWorkspaceUsersRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListWorkspaceUsersResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.user_name): + query['UserName'] = request.user_name + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListWorkspaceUsers', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/users', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListWorkspaceUsersResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_workspace_users( + self, + workspace_id: str, + request: aiwork_space_20210204_models.ListWorkspaceUsersRequest, + ) -> aiwork_space_20210204_models.ListWorkspaceUsersResponse: + """ + @summary 列出工作空间的可变为成员的用户 + + @param request: ListWorkspaceUsersRequest + @return: ListWorkspaceUsersResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_workspace_users_with_options(workspace_id, request, headers, runtime) + + async def list_workspace_users_async( + self, + workspace_id: str, + request: aiwork_space_20210204_models.ListWorkspaceUsersRequest, + ) -> aiwork_space_20210204_models.ListWorkspaceUsersResponse: + """ + @summary 列出工作空间的可变为成员的用户 + + @param request: ListWorkspaceUsersRequest + @return: ListWorkspaceUsersResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_workspace_users_with_options_async(workspace_id, request, headers, runtime) + + def list_workspaces_with_options( + self, + request: aiwork_space_20210204_models.ListWorkspacesRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListWorkspacesResponse: + """ + @summary 获得工作空间列表 + + @param request: ListWorkspacesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListWorkspacesResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.fields): + query['Fields'] = request.fields + if not UtilClient.is_unset(request.module_list): + query['ModuleList'] = request.module_list + if not UtilClient.is_unset(request.option): + query['Option'] = request.option + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.status): + query['Status'] = request.status + if not UtilClient.is_unset(request.verbose): + query['Verbose'] = request.verbose + if not UtilClient.is_unset(request.workspace_ids): + query['WorkspaceIds'] = request.workspace_ids + if not UtilClient.is_unset(request.workspace_name): + query['WorkspaceName'] = request.workspace_name + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListWorkspaces', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/workspaces', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListWorkspacesResponse(), + self.call_api(params, req, runtime) + ) + + async def list_workspaces_with_options_async( + self, + request: aiwork_space_20210204_models.ListWorkspacesRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.ListWorkspacesResponse: + """ + @summary 获得工作空间列表 + + @param request: ListWorkspacesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListWorkspacesResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.fields): + query['Fields'] = request.fields + if not UtilClient.is_unset(request.module_list): + query['ModuleList'] = request.module_list + if not UtilClient.is_unset(request.option): + query['Option'] = request.option + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.status): + query['Status'] = request.status + if not UtilClient.is_unset(request.verbose): + query['Verbose'] = request.verbose + if not UtilClient.is_unset(request.workspace_ids): + query['WorkspaceIds'] = request.workspace_ids + if not UtilClient.is_unset(request.workspace_name): + query['WorkspaceName'] = request.workspace_name + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListWorkspaces', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/workspaces', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.ListWorkspacesResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_workspaces( + self, + request: aiwork_space_20210204_models.ListWorkspacesRequest, + ) -> aiwork_space_20210204_models.ListWorkspacesResponse: + """ + @summary 获得工作空间列表 + + @param request: ListWorkspacesRequest + @return: ListWorkspacesResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_workspaces_with_options(request, headers, runtime) + + async def list_workspaces_async( + self, + request: aiwork_space_20210204_models.ListWorkspacesRequest, + ) -> aiwork_space_20210204_models.ListWorkspacesResponse: + """ + @summary 获得工作空间列表 + + @param request: ListWorkspacesRequest + @return: ListWorkspacesResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_workspaces_with_options_async(request, headers, runtime) + + def migrate_datasets_with_options( + self, + request: aiwork_space_20210204_models.MigrateDatasetsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.MigrateDatasetsResponse: + """ + @summary 迁移数据集 + + @param request: MigrateDatasetsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: MigrateDatasetsResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.count): + body['Count'] = request.count + if not UtilClient.is_unset(request.dataset_id): + body['DatasetId'] = request.dataset_id + if not UtilClient.is_unset(request.if_force): + body['IfForce'] = request.if_force + if not UtilClient.is_unset(request.owner_id): + body['OwnerId'] = request.owner_id + if not UtilClient.is_unset(request.workspace_id): + body['WorkspaceId'] = request.workspace_id + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='MigrateDatasets', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/datasets/migrate', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.MigrateDatasetsResponse(), + self.call_api(params, req, runtime) + ) + + async def migrate_datasets_with_options_async( + self, + request: aiwork_space_20210204_models.MigrateDatasetsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.MigrateDatasetsResponse: + """ + @summary 迁移数据集 + + @param request: MigrateDatasetsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: MigrateDatasetsResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.count): + body['Count'] = request.count + if not UtilClient.is_unset(request.dataset_id): + body['DatasetId'] = request.dataset_id + if not UtilClient.is_unset(request.if_force): + body['IfForce'] = request.if_force + if not UtilClient.is_unset(request.owner_id): + body['OwnerId'] = request.owner_id + if not UtilClient.is_unset(request.workspace_id): + body['WorkspaceId'] = request.workspace_id + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='MigrateDatasets', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/datasets/migrate', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.MigrateDatasetsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def migrate_datasets( + self, + request: aiwork_space_20210204_models.MigrateDatasetsRequest, + ) -> aiwork_space_20210204_models.MigrateDatasetsResponse: + """ + @summary 迁移数据集 + + @param request: MigrateDatasetsRequest + @return: MigrateDatasetsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.migrate_datasets_with_options(request, headers, runtime) + + async def migrate_datasets_async( + self, + request: aiwork_space_20210204_models.MigrateDatasetsRequest, + ) -> aiwork_space_20210204_models.MigrateDatasetsResponse: + """ + @summary 迁移数据集 + + @param request: MigrateDatasetsRequest + @return: MigrateDatasetsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.migrate_datasets_with_options_async(request, headers, runtime) + + def publish_code_source_with_options( + self, + code_source_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.PublishCodeSourceResponse: + """ + @summary 发布一个代码源配置为本工作空间下所有人可见 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: PublishCodeSourceResponse + """ + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='PublishCodeSource', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/codesources/{OpenApiUtilClient.get_encode_param(code_source_id)}/publish', + method='PUT', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.PublishCodeSourceResponse(), + self.call_api(params, req, runtime) + ) + + async def publish_code_source_with_options_async( + self, + code_source_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.PublishCodeSourceResponse: + """ + @summary 发布一个代码源配置为本工作空间下所有人可见 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: PublishCodeSourceResponse + """ + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='PublishCodeSource', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/codesources/{OpenApiUtilClient.get_encode_param(code_source_id)}/publish', + method='PUT', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.PublishCodeSourceResponse(), + await self.call_api_async(params, req, runtime) + ) + + def publish_code_source( + self, + code_source_id: str, + ) -> aiwork_space_20210204_models.PublishCodeSourceResponse: + """ + @summary 发布一个代码源配置为本工作空间下所有人可见 + + @return: PublishCodeSourceResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.publish_code_source_with_options(code_source_id, headers, runtime) + + async def publish_code_source_async( + self, + code_source_id: str, + ) -> aiwork_space_20210204_models.PublishCodeSourceResponse: + """ + @summary 发布一个代码源配置为本工作空间下所有人可见 + + @return: PublishCodeSourceResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.publish_code_source_with_options_async(code_source_id, headers, runtime) + + def publish_dataset_with_options( + self, + dataset_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.PublishDatasetResponse: + """ + @summary 更新数据集 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: PublishDatasetResponse + """ + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='PublishDataset', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/datasets/{OpenApiUtilClient.get_encode_param(dataset_id)}/publish', + method='PUT', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.PublishDatasetResponse(), + self.call_api(params, req, runtime) + ) + + async def publish_dataset_with_options_async( + self, + dataset_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.PublishDatasetResponse: + """ + @summary 更新数据集 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: PublishDatasetResponse + """ + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='PublishDataset', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/datasets/{OpenApiUtilClient.get_encode_param(dataset_id)}/publish', + method='PUT', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.PublishDatasetResponse(), + await self.call_api_async(params, req, runtime) + ) + + def publish_dataset( + self, + dataset_id: str, + ) -> aiwork_space_20210204_models.PublishDatasetResponse: + """ + @summary 更新数据集 + + @return: PublishDatasetResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.publish_dataset_with_options(dataset_id, headers, runtime) + + async def publish_dataset_async( + self, + dataset_id: str, + ) -> aiwork_space_20210204_models.PublishDatasetResponse: + """ + @summary 更新数据集 + + @return: PublishDatasetResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.publish_dataset_with_options_async(dataset_id, headers, runtime) + + def publish_image_with_options( + self, + image_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.PublishImageResponse: + """ + @summary 发布 Image + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: PublishImageResponse + """ + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='PublishImage', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/images/{OpenApiUtilClient.get_encode_param(image_id)}/publish', + method='PUT', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.PublishImageResponse(), + self.call_api(params, req, runtime) + ) + + async def publish_image_with_options_async( + self, + image_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.PublishImageResponse: + """ + @summary 发布 Image + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: PublishImageResponse + """ + req = open_api_models.OpenApiRequest( + headers=headers ) params = open_api_models.Params( - action='ListWorkspaces', + action='PublishImage', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces', - method='GET', + pathname=f'/api/v1/images/{OpenApiUtilClient.get_encode_param(image_id)}/publish', + method='PUT', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.ListWorkspacesResponse(), + aiwork_space_20210204_models.PublishImageResponse(), await self.call_api_async(params, req, runtime) ) - def list_workspaces( + def publish_image( self, - request: aiwork_space_20210204_models.ListWorkspacesRequest, - ) -> aiwork_space_20210204_models.ListWorkspacesResponse: + image_id: str, + ) -> aiwork_space_20210204_models.PublishImageResponse: + """ + @summary 发布 Image + + @return: PublishImageResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.list_workspaces_with_options(request, headers, runtime) + return self.publish_image_with_options(image_id, headers, runtime) - async def list_workspaces_async( + async def publish_image_async( self, - request: aiwork_space_20210204_models.ListWorkspacesRequest, - ) -> aiwork_space_20210204_models.ListWorkspacesResponse: + image_id: str, + ) -> aiwork_space_20210204_models.PublishImageResponse: + """ + @summary 发布 Image + + @return: PublishImageResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.list_workspaces_with_options_async(request, headers, runtime) + return await self.publish_image_with_options_async(image_id, headers, runtime) - def migrate_datasets_with_options( + def register_lineage_with_options( self, - request: aiwork_space_20210204_models.MigrateDatasetsRequest, + request: aiwork_space_20210204_models.RegisterLineageRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.MigrateDatasetsResponse: + ) -> aiwork_space_20210204_models.RegisterLineageResponse: + """ + @summary 创建血缘 + + @param request: RegisterLineageRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: RegisterLineageResponse + """ UtilClient.validate_model(request) body = {} - if not UtilClient.is_unset(request.count): - body['Count'] = request.count - if not UtilClient.is_unset(request.dataset_id): - body['DatasetId'] = request.dataset_id - if not UtilClient.is_unset(request.if_force): - body['IfForce'] = request.if_force - if not UtilClient.is_unset(request.owner_id): - body['OwnerId'] = request.owner_id - if not UtilClient.is_unset(request.workspace_id): - body['WorkspaceId'] = request.workspace_id + if not UtilClient.is_unset(request.attributes): + body['Attributes'] = request.attributes + if not UtilClient.is_unset(request.input_entities): + body['InputEntities'] = request.input_entities + if not UtilClient.is_unset(request.name): + body['Name'] = request.name + if not UtilClient.is_unset(request.output_entities): + body['OutputEntities'] = request.output_entities + if not UtilClient.is_unset(request.qualified_name): + body['QualifiedName'] = request.qualified_name + if not UtilClient.is_unset(request.register_task_as_entity): + body['RegisterTaskAsEntity'] = request.register_task_as_entity req = open_api_models.OpenApiRequest( headers=headers, body=OpenApiUtilClient.parse_to_map(body) ) params = open_api_models.Params( - action='MigrateDatasets', + action='RegisterLineage', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/datasets/migrate', + pathname=f'/api/v1/lineages', method='POST', auth_type='AK', style='ROA', @@ -5810,37 +12144,47 @@ def migrate_datasets_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.MigrateDatasetsResponse(), + aiwork_space_20210204_models.RegisterLineageResponse(), self.call_api(params, req, runtime) ) - async def migrate_datasets_with_options_async( + async def register_lineage_with_options_async( self, - request: aiwork_space_20210204_models.MigrateDatasetsRequest, + request: aiwork_space_20210204_models.RegisterLineageRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.MigrateDatasetsResponse: + ) -> aiwork_space_20210204_models.RegisterLineageResponse: + """ + @summary 创建血缘 + + @param request: RegisterLineageRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: RegisterLineageResponse + """ UtilClient.validate_model(request) body = {} - if not UtilClient.is_unset(request.count): - body['Count'] = request.count - if not UtilClient.is_unset(request.dataset_id): - body['DatasetId'] = request.dataset_id - if not UtilClient.is_unset(request.if_force): - body['IfForce'] = request.if_force - if not UtilClient.is_unset(request.owner_id): - body['OwnerId'] = request.owner_id - if not UtilClient.is_unset(request.workspace_id): - body['WorkspaceId'] = request.workspace_id + if not UtilClient.is_unset(request.attributes): + body['Attributes'] = request.attributes + if not UtilClient.is_unset(request.input_entities): + body['InputEntities'] = request.input_entities + if not UtilClient.is_unset(request.name): + body['Name'] = request.name + if not UtilClient.is_unset(request.output_entities): + body['OutputEntities'] = request.output_entities + if not UtilClient.is_unset(request.qualified_name): + body['QualifiedName'] = request.qualified_name + if not UtilClient.is_unset(request.register_task_as_entity): + body['RegisterTaskAsEntity'] = request.register_task_as_entity req = open_api_models.OpenApiRequest( headers=headers, body=OpenApiUtilClient.parse_to_map(body) ) params = open_api_models.Params( - action='MigrateDatasets', + action='RegisterLineage', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/datasets/migrate', + pathname=f'/api/v1/lineages', method='POST', auth_type='AK', style='ROA', @@ -5848,238 +12192,342 @@ async def migrate_datasets_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.MigrateDatasetsResponse(), + aiwork_space_20210204_models.RegisterLineageResponse(), await self.call_api_async(params, req, runtime) ) - def migrate_datasets( + def register_lineage( self, - request: aiwork_space_20210204_models.MigrateDatasetsRequest, - ) -> aiwork_space_20210204_models.MigrateDatasetsResponse: + request: aiwork_space_20210204_models.RegisterLineageRequest, + ) -> aiwork_space_20210204_models.RegisterLineageResponse: + """ + @summary 创建血缘 + + @param request: RegisterLineageRequest + @return: RegisterLineageResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.migrate_datasets_with_options(request, headers, runtime) + return self.register_lineage_with_options(request, headers, runtime) - async def migrate_datasets_async( + async def register_lineage_async( self, - request: aiwork_space_20210204_models.MigrateDatasetsRequest, - ) -> aiwork_space_20210204_models.MigrateDatasetsResponse: + request: aiwork_space_20210204_models.RegisterLineageRequest, + ) -> aiwork_space_20210204_models.RegisterLineageResponse: + """ + @summary 创建血缘 + + @param request: RegisterLineageRequest + @return: RegisterLineageResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.migrate_datasets_with_options_async(request, headers, runtime) + return await self.register_lineage_with_options_async(request, headers, runtime) - def publish_code_source_with_options( + def remove_image_with_options( self, - code_source_id: str, + image_id: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.PublishCodeSourceResponse: + ) -> aiwork_space_20210204_models.RemoveImageResponse: + """ + @summary 删除 Image + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: RemoveImageResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) params = open_api_models.Params( - action='PublishCodeSource', + action='RemoveImage', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/codesources/{OpenApiUtilClient.get_encode_param(code_source_id)}/publish', - method='PUT', + pathname=f'/api/v1/images/{OpenApiUtilClient.get_encode_param(image_id)}', + method='DELETE', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.PublishCodeSourceResponse(), + aiwork_space_20210204_models.RemoveImageResponse(), self.call_api(params, req, runtime) ) - async def publish_code_source_with_options_async( + async def remove_image_with_options_async( self, - code_source_id: str, + image_id: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.PublishCodeSourceResponse: + ) -> aiwork_space_20210204_models.RemoveImageResponse: + """ + @summary 删除 Image + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: RemoveImageResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) params = open_api_models.Params( - action='PublishCodeSource', + action='RemoveImage', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/codesources/{OpenApiUtilClient.get_encode_param(code_source_id)}/publish', - method='PUT', + pathname=f'/api/v1/images/{OpenApiUtilClient.get_encode_param(image_id)}', + method='DELETE', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.PublishCodeSourceResponse(), + aiwork_space_20210204_models.RemoveImageResponse(), await self.call_api_async(params, req, runtime) ) - def publish_code_source( + def remove_image( self, - code_source_id: str, - ) -> aiwork_space_20210204_models.PublishCodeSourceResponse: + image_id: str, + ) -> aiwork_space_20210204_models.RemoveImageResponse: + """ + @summary 删除 Image + + @return: RemoveImageResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.publish_code_source_with_options(code_source_id, headers, runtime) + return self.remove_image_with_options(image_id, headers, runtime) - async def publish_code_source_async( + async def remove_image_async( self, - code_source_id: str, - ) -> aiwork_space_20210204_models.PublishCodeSourceResponse: + image_id: str, + ) -> aiwork_space_20210204_models.RemoveImageResponse: + """ + @summary 删除 Image + + @return: RemoveImageResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.publish_code_source_with_options_async(code_source_id, headers, runtime) + return await self.remove_image_with_options_async(image_id, headers, runtime) - def publish_dataset_with_options( + def remove_image_labels_with_options( self, - dataset_id: str, + image_id: str, + label_key: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.PublishDatasetResponse: + ) -> aiwork_space_20210204_models.RemoveImageLabelsResponse: + """ + @summary 删除 Image 的标签 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: RemoveImageLabelsResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) params = open_api_models.Params( - action='PublishDataset', + action='RemoveImageLabels', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/datasets/{OpenApiUtilClient.get_encode_param(dataset_id)}/publish', - method='PUT', + pathname=f'/api/v1/images/{OpenApiUtilClient.get_encode_param(image_id)}/labels/{OpenApiUtilClient.get_encode_param(label_key)}', + method='DELETE', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.PublishDatasetResponse(), + aiwork_space_20210204_models.RemoveImageLabelsResponse(), self.call_api(params, req, runtime) ) - async def publish_dataset_with_options_async( + async def remove_image_labels_with_options_async( self, - dataset_id: str, + image_id: str, + label_key: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.PublishDatasetResponse: + ) -> aiwork_space_20210204_models.RemoveImageLabelsResponse: + """ + @summary 删除 Image 的标签 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: RemoveImageLabelsResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) params = open_api_models.Params( - action='PublishDataset', + action='RemoveImageLabels', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/datasets/{OpenApiUtilClient.get_encode_param(dataset_id)}/publish', - method='PUT', + pathname=f'/api/v1/images/{OpenApiUtilClient.get_encode_param(image_id)}/labels/{OpenApiUtilClient.get_encode_param(label_key)}', + method='DELETE', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.PublishDatasetResponse(), + aiwork_space_20210204_models.RemoveImageLabelsResponse(), await self.call_api_async(params, req, runtime) ) - def publish_dataset( + def remove_image_labels( self, - dataset_id: str, - ) -> aiwork_space_20210204_models.PublishDatasetResponse: + image_id: str, + label_key: str, + ) -> aiwork_space_20210204_models.RemoveImageLabelsResponse: + """ + @summary 删除 Image 的标签 + + @return: RemoveImageLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.publish_dataset_with_options(dataset_id, headers, runtime) + return self.remove_image_labels_with_options(image_id, label_key, headers, runtime) - async def publish_dataset_async( + async def remove_image_labels_async( self, - dataset_id: str, - ) -> aiwork_space_20210204_models.PublishDatasetResponse: + image_id: str, + label_key: str, + ) -> aiwork_space_20210204_models.RemoveImageLabelsResponse: + """ + @summary 删除 Image 的标签 + + @return: RemoveImageLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.publish_dataset_with_options_async(dataset_id, headers, runtime) + return await self.remove_image_labels_with_options_async(image_id, label_key, headers, runtime) - def publish_image_with_options( + def remove_member_role_with_options( self, - image_id: str, + workspace_id: str, + member_id: str, + role_name: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.PublishImageResponse: + ) -> aiwork_space_20210204_models.RemoveMemberRoleResponse: + """ + @summary 删除成员角色 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: RemoveMemberRoleResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) params = open_api_models.Params( - action='PublishImage', + action='RemoveMemberRole', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/images/{OpenApiUtilClient.get_encode_param(image_id)}/publish', - method='PUT', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/members/{OpenApiUtilClient.get_encode_param(member_id)}/roles/{OpenApiUtilClient.get_encode_param(role_name)}', + method='DELETE', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.PublishImageResponse(), + aiwork_space_20210204_models.RemoveMemberRoleResponse(), self.call_api(params, req, runtime) ) - async def publish_image_with_options_async( + async def remove_member_role_with_options_async( self, - image_id: str, + workspace_id: str, + member_id: str, + role_name: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.PublishImageResponse: + ) -> aiwork_space_20210204_models.RemoveMemberRoleResponse: + """ + @summary 删除成员角色 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: RemoveMemberRoleResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) params = open_api_models.Params( - action='PublishImage', + action='RemoveMemberRole', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/images/{OpenApiUtilClient.get_encode_param(image_id)}/publish', - method='PUT', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/members/{OpenApiUtilClient.get_encode_param(member_id)}/roles/{OpenApiUtilClient.get_encode_param(role_name)}', + method='DELETE', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.PublishImageResponse(), + aiwork_space_20210204_models.RemoveMemberRoleResponse(), await self.call_api_async(params, req, runtime) ) - def publish_image( + def remove_member_role( self, - image_id: str, - ) -> aiwork_space_20210204_models.PublishImageResponse: + workspace_id: str, + member_id: str, + role_name: str, + ) -> aiwork_space_20210204_models.RemoveMemberRoleResponse: + """ + @summary 删除成员角色 + + @return: RemoveMemberRoleResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.publish_image_with_options(image_id, headers, runtime) + return self.remove_member_role_with_options(workspace_id, member_id, role_name, headers, runtime) - async def publish_image_async( + async def remove_member_role_async( self, - image_id: str, - ) -> aiwork_space_20210204_models.PublishImageResponse: + workspace_id: str, + member_id: str, + role_name: str, + ) -> aiwork_space_20210204_models.RemoveMemberRoleResponse: + """ + @summary 删除成员角色 + + @return: RemoveMemberRoleResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.publish_image_with_options_async(image_id, headers, runtime) + return await self.remove_member_role_with_options_async(workspace_id, member_id, role_name, headers, runtime) - def remove_image_with_options( + def remove_workspace_quota_with_options( self, - image_id: str, + workspace_id: str, + quota_id: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.RemoveImageResponse: + ) -> aiwork_space_20210204_models.RemoveWorkspaceQuotaResponse: + """ + @summary 移除资源实例配额 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: RemoveWorkspaceQuotaResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) params = open_api_models.Params( - action='RemoveImage', + action='RemoveWorkspaceQuota', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/images/{OpenApiUtilClient.get_encode_param(image_id)}', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/quotas/{OpenApiUtilClient.get_encode_param(quota_id)}', method='DELETE', auth_type='AK', style='ROA', @@ -6087,24 +12535,32 @@ def remove_image_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.RemoveImageResponse(), + aiwork_space_20210204_models.RemoveWorkspaceQuotaResponse(), self.call_api(params, req, runtime) ) - async def remove_image_with_options_async( + async def remove_workspace_quota_with_options_async( self, - image_id: str, + workspace_id: str, + quota_id: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.RemoveImageResponse: + ) -> aiwork_space_20210204_models.RemoveWorkspaceQuotaResponse: + """ + @summary 移除资源实例配额 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: RemoveWorkspaceQuotaResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) params = open_api_models.Params( - action='RemoveImage', + action='RemoveWorkspaceQuota', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/images/{OpenApiUtilClient.get_encode_param(image_id)}', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/quotas/{OpenApiUtilClient.get_encode_param(quota_id)}', method='DELETE', auth_type='AK', style='ROA', @@ -6112,245 +12568,370 @@ async def remove_image_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.RemoveImageResponse(), + aiwork_space_20210204_models.RemoveWorkspaceQuotaResponse(), await self.call_api_async(params, req, runtime) ) - def remove_image( + def remove_workspace_quota( self, - image_id: str, - ) -> aiwork_space_20210204_models.RemoveImageResponse: + workspace_id: str, + quota_id: str, + ) -> aiwork_space_20210204_models.RemoveWorkspaceQuotaResponse: + """ + @summary 移除资源实例配额 + + @return: RemoveWorkspaceQuotaResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.remove_image_with_options(image_id, headers, runtime) + return self.remove_workspace_quota_with_options(workspace_id, quota_id, headers, runtime) - async def remove_image_async( + async def remove_workspace_quota_async( self, - image_id: str, - ) -> aiwork_space_20210204_models.RemoveImageResponse: + workspace_id: str, + quota_id: str, + ) -> aiwork_space_20210204_models.RemoveWorkspaceQuotaResponse: + """ + @summary 移除资源实例配额 + + @return: RemoveWorkspaceQuotaResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.remove_image_with_options_async(image_id, headers, runtime) + return await self.remove_workspace_quota_with_options_async(workspace_id, quota_id, headers, runtime) - def remove_image_labels_with_options( + def set_experiment_labels_with_options( self, - image_id: str, - label_keys: str, + experiment_id: str, + request: aiwork_space_20210204_models.SetExperimentLabelsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.RemoveImageLabelsResponse: + ) -> aiwork_space_20210204_models.SetExperimentLabelsResponse: + """ + @summary 更新实验标签 + + @param request: SetExperimentLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: SetExperimentLabelsResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.labels): + body['Labels'] = request.labels req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) ) params = open_api_models.Params( - action='RemoveImageLabels', + action='SetExperimentLabels', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/images/{OpenApiUtilClient.get_encode_param(image_id)}/labels/{OpenApiUtilClient.get_encode_param(label_keys)}', - method='DELETE', + pathname=f'/api/v1/experiments/{OpenApiUtilClient.get_encode_param(experiment_id)}/labels', + method='POST', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.RemoveImageLabelsResponse(), + aiwork_space_20210204_models.SetExperimentLabelsResponse(), self.call_api(params, req, runtime) ) - async def remove_image_labels_with_options_async( + async def set_experiment_labels_with_options_async( self, - image_id: str, - label_keys: str, + experiment_id: str, + request: aiwork_space_20210204_models.SetExperimentLabelsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.RemoveImageLabelsResponse: + ) -> aiwork_space_20210204_models.SetExperimentLabelsResponse: + """ + @summary 更新实验标签 + + @param request: SetExperimentLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: SetExperimentLabelsResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.labels): + body['Labels'] = request.labels req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) ) params = open_api_models.Params( - action='RemoveImageLabels', + action='SetExperimentLabels', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/images/{OpenApiUtilClient.get_encode_param(image_id)}/labels/{OpenApiUtilClient.get_encode_param(label_keys)}', - method='DELETE', + pathname=f'/api/v1/experiments/{OpenApiUtilClient.get_encode_param(experiment_id)}/labels', + method='POST', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.RemoveImageLabelsResponse(), + aiwork_space_20210204_models.SetExperimentLabelsResponse(), await self.call_api_async(params, req, runtime) ) - def remove_image_labels( + def set_experiment_labels( self, - image_id: str, - label_keys: str, - ) -> aiwork_space_20210204_models.RemoveImageLabelsResponse: + experiment_id: str, + request: aiwork_space_20210204_models.SetExperimentLabelsRequest, + ) -> aiwork_space_20210204_models.SetExperimentLabelsResponse: + """ + @summary 更新实验标签 + + @param request: SetExperimentLabelsRequest + @return: SetExperimentLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.remove_image_labels_with_options(image_id, label_keys, headers, runtime) + return self.set_experiment_labels_with_options(experiment_id, request, headers, runtime) - async def remove_image_labels_async( + async def set_experiment_labels_async( self, - image_id: str, - label_keys: str, - ) -> aiwork_space_20210204_models.RemoveImageLabelsResponse: + experiment_id: str, + request: aiwork_space_20210204_models.SetExperimentLabelsRequest, + ) -> aiwork_space_20210204_models.SetExperimentLabelsResponse: + """ + @summary 更新实验标签 + + @param request: SetExperimentLabelsRequest + @return: SetExperimentLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.remove_image_labels_with_options_async(image_id, label_keys, headers, runtime) + return await self.set_experiment_labels_with_options_async(experiment_id, request, headers, runtime) - def remove_member_role_with_options( + def set_trial_labels_with_options( self, - workspace_id: str, - member_id: str, - role_name: str, + trial_id: str, + request: aiwork_space_20210204_models.SetTrialLabelsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.RemoveMemberRoleResponse: + ) -> aiwork_space_20210204_models.SetTrialLabelsResponse: + """ + @summary 更新Trial标签 + + @param request: SetTrialLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: SetTrialLabelsResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.labels): + body['Labels'] = request.labels req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) ) params = open_api_models.Params( - action='RemoveMemberRole', + action='SetTrialLabels', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/members/{OpenApiUtilClient.get_encode_param(member_id)}/roles/{OpenApiUtilClient.get_encode_param(role_name)}', - method='DELETE', + pathname=f'/api/v1/trials/{OpenApiUtilClient.get_encode_param(trial_id)}/Labels', + method='POST', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.RemoveMemberRoleResponse(), + aiwork_space_20210204_models.SetTrialLabelsResponse(), self.call_api(params, req, runtime) ) - async def remove_member_role_with_options_async( + async def set_trial_labels_with_options_async( self, - workspace_id: str, - member_id: str, - role_name: str, + trial_id: str, + request: aiwork_space_20210204_models.SetTrialLabelsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.RemoveMemberRoleResponse: + ) -> aiwork_space_20210204_models.SetTrialLabelsResponse: + """ + @summary 更新Trial标签 + + @param request: SetTrialLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: SetTrialLabelsResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.labels): + body['Labels'] = request.labels req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) ) params = open_api_models.Params( - action='RemoveMemberRole', + action='SetTrialLabels', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/members/{OpenApiUtilClient.get_encode_param(member_id)}/roles/{OpenApiUtilClient.get_encode_param(role_name)}', - method='DELETE', + pathname=f'/api/v1/trials/{OpenApiUtilClient.get_encode_param(trial_id)}/Labels', + method='POST', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.RemoveMemberRoleResponse(), + aiwork_space_20210204_models.SetTrialLabelsResponse(), await self.call_api_async(params, req, runtime) ) - def remove_member_role( + def set_trial_labels( self, - workspace_id: str, - member_id: str, - role_name: str, - ) -> aiwork_space_20210204_models.RemoveMemberRoleResponse: + trial_id: str, + request: aiwork_space_20210204_models.SetTrialLabelsRequest, + ) -> aiwork_space_20210204_models.SetTrialLabelsResponse: + """ + @summary 更新Trial标签 + + @param request: SetTrialLabelsRequest + @return: SetTrialLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.remove_member_role_with_options(workspace_id, member_id, role_name, headers, runtime) + return self.set_trial_labels_with_options(trial_id, request, headers, runtime) - async def remove_member_role_async( + async def set_trial_labels_async( self, - workspace_id: str, - member_id: str, - role_name: str, - ) -> aiwork_space_20210204_models.RemoveMemberRoleResponse: + trial_id: str, + request: aiwork_space_20210204_models.SetTrialLabelsRequest, + ) -> aiwork_space_20210204_models.SetTrialLabelsResponse: + """ + @summary 更新Trial标签 + + @param request: SetTrialLabelsRequest + @return: SetTrialLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.remove_member_role_with_options_async(workspace_id, member_id, role_name, headers, runtime) + return await self.set_trial_labels_with_options_async(trial_id, request, headers, runtime) - def remove_workspace_quota_with_options( + def set_user_configs_with_options( self, - workspace_id: str, - quota_id: str, + request: aiwork_space_20210204_models.SetUserConfigsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.RemoveWorkspaceQuotaResponse: + ) -> aiwork_space_20210204_models.SetUserConfigsResponse: + """ + @summary 更新用户配置 + + @param request: SetUserConfigsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: SetUserConfigsResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.configs): + body['Configs'] = request.configs req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) ) params = open_api_models.Params( - action='RemoveWorkspaceQuota', + action='SetUserConfigs', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/quotas/{OpenApiUtilClient.get_encode_param(quota_id)}', - method='DELETE', + pathname=f'/api/v1/userconfigs', + method='PUT', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.RemoveWorkspaceQuotaResponse(), + aiwork_space_20210204_models.SetUserConfigsResponse(), self.call_api(params, req, runtime) ) - async def remove_workspace_quota_with_options_async( + async def set_user_configs_with_options_async( self, - workspace_id: str, - quota_id: str, + request: aiwork_space_20210204_models.SetUserConfigsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.RemoveWorkspaceQuotaResponse: + ) -> aiwork_space_20210204_models.SetUserConfigsResponse: + """ + @summary 更新用户配置 + + @param request: SetUserConfigsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: SetUserConfigsResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.configs): + body['Configs'] = request.configs req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) ) params = open_api_models.Params( - action='RemoveWorkspaceQuota', + action='SetUserConfigs', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/quotas/{OpenApiUtilClient.get_encode_param(quota_id)}', - method='DELETE', + pathname=f'/api/v1/userconfigs', + method='PUT', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.RemoveWorkspaceQuotaResponse(), + aiwork_space_20210204_models.SetUserConfigsResponse(), await self.call_api_async(params, req, runtime) ) - def remove_workspace_quota( + def set_user_configs( self, - workspace_id: str, - quota_id: str, - ) -> aiwork_space_20210204_models.RemoveWorkspaceQuotaResponse: + request: aiwork_space_20210204_models.SetUserConfigsRequest, + ) -> aiwork_space_20210204_models.SetUserConfigsResponse: + """ + @summary 更新用户配置 + + @param request: SetUserConfigsRequest + @return: SetUserConfigsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.remove_workspace_quota_with_options(workspace_id, quota_id, headers, runtime) + return self.set_user_configs_with_options(request, headers, runtime) - async def remove_workspace_quota_async( + async def set_user_configs_async( self, - workspace_id: str, - quota_id: str, - ) -> aiwork_space_20210204_models.RemoveWorkspaceQuotaResponse: + request: aiwork_space_20210204_models.SetUserConfigsRequest, + ) -> aiwork_space_20210204_models.SetUserConfigsResponse: + """ + @summary 更新用户配置 + + @param request: SetUserConfigsRequest + @return: SetUserConfigsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.remove_workspace_quota_with_options_async(workspace_id, quota_id, headers, runtime) + return await self.set_user_configs_with_options_async(request, headers, runtime) def sync_users_with_options( self, headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.SyncUsersResponse: + """ + @summary 同步用户信息 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: SyncUsersResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -6375,6 +12956,13 @@ async def sync_users_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.SyncUsersResponse: + """ + @summary 同步用户信息 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: SyncUsersResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -6395,11 +12983,21 @@ async def sync_users_with_options_async( ) def sync_users(self) -> aiwork_space_20210204_models.SyncUsersResponse: + """ + @summary 同步用户信息 + + @return: SyncUsersResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.sync_users_with_options(headers, runtime) async def sync_users_async(self) -> aiwork_space_20210204_models.SyncUsersResponse: + """ + @summary 同步用户信息 + + @return: SyncUsersResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.sync_users_with_options_async(headers, runtime) @@ -6411,6 +13009,14 @@ def update_configs_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.UpdateConfigsResponse: + """ + @summary 更新配置 + + @param request: UpdateConfigsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateConfigsResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.configs): @@ -6442,6 +13048,14 @@ async def update_configs_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.UpdateConfigsResponse: + """ + @summary 更新配置 + + @param request: UpdateConfigsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateConfigsResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.configs): @@ -6468,35 +13082,174 @@ async def update_configs_with_options_async( def update_configs( self, - workspace_id: str, - request: aiwork_space_20210204_models.UpdateConfigsRequest, - ) -> aiwork_space_20210204_models.UpdateConfigsResponse: + workspace_id: str, + request: aiwork_space_20210204_models.UpdateConfigsRequest, + ) -> aiwork_space_20210204_models.UpdateConfigsResponse: + """ + @summary 更新配置 + + @param request: UpdateConfigsRequest + @return: UpdateConfigsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.update_configs_with_options(workspace_id, request, headers, runtime) + + async def update_configs_async( + self, + workspace_id: str, + request: aiwork_space_20210204_models.UpdateConfigsRequest, + ) -> aiwork_space_20210204_models.UpdateConfigsResponse: + """ + @summary 更新配置 + + @param request: UpdateConfigsRequest + @return: UpdateConfigsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.update_configs_with_options_async(workspace_id, request, headers, runtime) + + def update_dataset_with_options( + self, + dataset_id: str, + request: aiwork_space_20210204_models.UpdateDatasetRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.UpdateDatasetResponse: + """ + @summary 更新数据集 + + @param request: UpdateDatasetRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateDatasetResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.description): + body['Description'] = request.description + if not UtilClient.is_unset(request.name): + body['Name'] = request.name + if not UtilClient.is_unset(request.options): + body['Options'] = request.options + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='UpdateDataset', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/datasets/{OpenApiUtilClient.get_encode_param(dataset_id)}', + method='PUT', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.UpdateDatasetResponse(), + self.call_api(params, req, runtime) + ) + + async def update_dataset_with_options_async( + self, + dataset_id: str, + request: aiwork_space_20210204_models.UpdateDatasetRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.UpdateDatasetResponse: + """ + @summary 更新数据集 + + @param request: UpdateDatasetRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateDatasetResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.description): + body['Description'] = request.description + if not UtilClient.is_unset(request.name): + body['Name'] = request.name + if not UtilClient.is_unset(request.options): + body['Options'] = request.options + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='UpdateDataset', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/datasets/{OpenApiUtilClient.get_encode_param(dataset_id)}', + method='PUT', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.UpdateDatasetResponse(), + await self.call_api_async(params, req, runtime) + ) + + def update_dataset( + self, + dataset_id: str, + request: aiwork_space_20210204_models.UpdateDatasetRequest, + ) -> aiwork_space_20210204_models.UpdateDatasetResponse: + """ + @summary 更新数据集 + + @param request: UpdateDatasetRequest + @return: UpdateDatasetResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.update_configs_with_options(workspace_id, request, headers, runtime) + return self.update_dataset_with_options(dataset_id, request, headers, runtime) - async def update_configs_async( + async def update_dataset_async( self, - workspace_id: str, - request: aiwork_space_20210204_models.UpdateConfigsRequest, - ) -> aiwork_space_20210204_models.UpdateConfigsResponse: + dataset_id: str, + request: aiwork_space_20210204_models.UpdateDatasetRequest, + ) -> aiwork_space_20210204_models.UpdateDatasetResponse: + """ + @summary 更新数据集 + + @param request: UpdateDatasetRequest + @return: UpdateDatasetResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.update_configs_with_options_async(workspace_id, request, headers, runtime) + return await self.update_dataset_with_options_async(dataset_id, request, headers, runtime) - def update_dataset_with_options( + def update_dataset_version_with_options( self, dataset_id: str, - request: aiwork_space_20210204_models.UpdateDatasetRequest, + version_name: str, + request: aiwork_space_20210204_models.UpdateDatasetVersionRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.UpdateDatasetResponse: + ) -> aiwork_space_20210204_models.UpdateDatasetVersionResponse: + """ + @summary 更新指定版本的数据集信息 + + @param request: UpdateDatasetVersionRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateDatasetVersionResponse + """ UtilClient.validate_model(request) body = {} + if not UtilClient.is_unset(request.data_count): + body['DataCount'] = request.data_count + if not UtilClient.is_unset(request.data_size): + body['DataSize'] = request.data_size if not UtilClient.is_unset(request.description): body['Description'] = request.description - if not UtilClient.is_unset(request.name): - body['Name'] = request.name if not UtilClient.is_unset(request.options): body['Options'] = request.options req = open_api_models.OpenApiRequest( @@ -6504,10 +13257,10 @@ def update_dataset_with_options( body=OpenApiUtilClient.parse_to_map(body) ) params = open_api_models.Params( - action='UpdateDataset', + action='UpdateDatasetVersion', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/datasets/{OpenApiUtilClient.get_encode_param(dataset_id)}', + pathname=f'/api/v1/datasets/{OpenApiUtilClient.get_encode_param(dataset_id)}/versions/{OpenApiUtilClient.get_encode_param(version_name)}', method='PUT', auth_type='AK', style='ROA', @@ -6515,23 +13268,34 @@ def update_dataset_with_options( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.UpdateDatasetResponse(), + aiwork_space_20210204_models.UpdateDatasetVersionResponse(), self.call_api(params, req, runtime) ) - async def update_dataset_with_options_async( + async def update_dataset_version_with_options_async( self, dataset_id: str, - request: aiwork_space_20210204_models.UpdateDatasetRequest, + version_name: str, + request: aiwork_space_20210204_models.UpdateDatasetVersionRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> aiwork_space_20210204_models.UpdateDatasetResponse: + ) -> aiwork_space_20210204_models.UpdateDatasetVersionResponse: + """ + @summary 更新指定版本的数据集信息 + + @param request: UpdateDatasetVersionRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateDatasetVersionResponse + """ UtilClient.validate_model(request) body = {} + if not UtilClient.is_unset(request.data_count): + body['DataCount'] = request.data_count + if not UtilClient.is_unset(request.data_size): + body['DataSize'] = request.data_size if not UtilClient.is_unset(request.description): body['Description'] = request.description - if not UtilClient.is_unset(request.name): - body['Name'] = request.name if not UtilClient.is_unset(request.options): body['Options'] = request.options req = open_api_models.OpenApiRequest( @@ -6539,10 +13303,10 @@ async def update_dataset_with_options_async( body=OpenApiUtilClient.parse_to_map(body) ) params = open_api_models.Params( - action='UpdateDataset', + action='UpdateDatasetVersion', version='2021-02-04', protocol='HTTPS', - pathname=f'/api/v1/datasets/{OpenApiUtilClient.get_encode_param(dataset_id)}', + pathname=f'/api/v1/datasets/{OpenApiUtilClient.get_encode_param(dataset_id)}/versions/{OpenApiUtilClient.get_encode_param(version_name)}', method='PUT', auth_type='AK', style='ROA', @@ -6550,27 +13314,41 @@ async def update_dataset_with_options_async( body_type='json' ) return TeaCore.from_map( - aiwork_space_20210204_models.UpdateDatasetResponse(), + aiwork_space_20210204_models.UpdateDatasetVersionResponse(), await self.call_api_async(params, req, runtime) ) - def update_dataset( + def update_dataset_version( self, dataset_id: str, - request: aiwork_space_20210204_models.UpdateDatasetRequest, - ) -> aiwork_space_20210204_models.UpdateDatasetResponse: + version_name: str, + request: aiwork_space_20210204_models.UpdateDatasetVersionRequest, + ) -> aiwork_space_20210204_models.UpdateDatasetVersionResponse: + """ + @summary 更新指定版本的数据集信息 + + @param request: UpdateDatasetVersionRequest + @return: UpdateDatasetVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.update_dataset_with_options(dataset_id, request, headers, runtime) + return self.update_dataset_version_with_options(dataset_id, version_name, request, headers, runtime) - async def update_dataset_async( + async def update_dataset_version_async( self, dataset_id: str, - request: aiwork_space_20210204_models.UpdateDatasetRequest, - ) -> aiwork_space_20210204_models.UpdateDatasetResponse: + version_name: str, + request: aiwork_space_20210204_models.UpdateDatasetVersionRequest, + ) -> aiwork_space_20210204_models.UpdateDatasetVersionResponse: + """ + @summary 更新指定版本的数据集信息 + + @param request: UpdateDatasetVersionRequest + @return: UpdateDatasetVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.update_dataset_with_options_async(dataset_id, request, headers, runtime) + return await self.update_dataset_version_with_options_async(dataset_id, version_name, request, headers, runtime) def update_default_workspace_with_options( self, @@ -6578,6 +13356,14 @@ def update_default_workspace_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.UpdateDefaultWorkspaceResponse: + """ + @summary 更新默认工作空间 + + @param request: UpdateDefaultWorkspaceRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateDefaultWorkspaceResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.workspace_id): @@ -6608,6 +13394,14 @@ async def update_default_workspace_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.UpdateDefaultWorkspaceResponse: + """ + @summary 更新默认工作空间 + + @param request: UpdateDefaultWorkspaceRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateDefaultWorkspaceResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.workspace_id): @@ -6636,6 +13430,12 @@ def update_default_workspace( self, request: aiwork_space_20210204_models.UpdateDefaultWorkspaceRequest, ) -> aiwork_space_20210204_models.UpdateDefaultWorkspaceResponse: + """ + @summary 更新默认工作空间 + + @param request: UpdateDefaultWorkspaceRequest + @return: UpdateDefaultWorkspaceResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.update_default_workspace_with_options(request, headers, runtime) @@ -6644,10 +13444,128 @@ async def update_default_workspace_async( self, request: aiwork_space_20210204_models.UpdateDefaultWorkspaceRequest, ) -> aiwork_space_20210204_models.UpdateDefaultWorkspaceResponse: + """ + @summary 更新默认工作空间 + + @param request: UpdateDefaultWorkspaceRequest + @return: UpdateDefaultWorkspaceResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.update_default_workspace_with_options_async(request, headers, runtime) + def update_experiment_with_options( + self, + experiment_id: str, + request: aiwork_space_20210204_models.UpdateExperimentRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.UpdateExperimentResponse: + """ + @summary 更新实验 + + @param request: UpdateExperimentRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateExperimentResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.accessibility): + body['Accessibility'] = request.accessibility + if not UtilClient.is_unset(request.name): + body['Name'] = request.name + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='UpdateExperiment', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/experiments/{OpenApiUtilClient.get_encode_param(experiment_id)}', + method='PUT', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.UpdateExperimentResponse(), + self.call_api(params, req, runtime) + ) + + async def update_experiment_with_options_async( + self, + experiment_id: str, + request: aiwork_space_20210204_models.UpdateExperimentRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.UpdateExperimentResponse: + """ + @summary 更新实验 + + @param request: UpdateExperimentRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateExperimentResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.accessibility): + body['Accessibility'] = request.accessibility + if not UtilClient.is_unset(request.name): + body['Name'] = request.name + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='UpdateExperiment', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/experiments/{OpenApiUtilClient.get_encode_param(experiment_id)}', + method='PUT', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.UpdateExperimentResponse(), + await self.call_api_async(params, req, runtime) + ) + + def update_experiment( + self, + experiment_id: str, + request: aiwork_space_20210204_models.UpdateExperimentRequest, + ) -> aiwork_space_20210204_models.UpdateExperimentResponse: + """ + @summary 更新实验 + + @param request: UpdateExperimentRequest + @return: UpdateExperimentResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.update_experiment_with_options(experiment_id, request, headers, runtime) + + async def update_experiment_async( + self, + experiment_id: str, + request: aiwork_space_20210204_models.UpdateExperimentRequest, + ) -> aiwork_space_20210204_models.UpdateExperimentResponse: + """ + @summary 更新实验 + + @param request: UpdateExperimentRequest + @return: UpdateExperimentResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.update_experiment_with_options_async(experiment_id, request, headers, runtime) + def update_model_with_options( self, model_id: str, @@ -6655,18 +13573,32 @@ def update_model_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.UpdateModelResponse: + """ + @summary 更新模型 + + @param request: UpdateModelRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateModelResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.accessibility): body['Accessibility'] = request.accessibility if not UtilClient.is_unset(request.domain): body['Domain'] = request.domain + if not UtilClient.is_unset(request.extra_info): + body['ExtraInfo'] = request.extra_info if not UtilClient.is_unset(request.model_description): body['ModelDescription'] = request.model_description if not UtilClient.is_unset(request.model_doc): body['ModelDoc'] = request.model_doc if not UtilClient.is_unset(request.model_name): body['ModelName'] = request.model_name + if not UtilClient.is_unset(request.model_type): + body['ModelType'] = request.model_type + if not UtilClient.is_unset(request.order_number): + body['OrderNumber'] = request.order_number if not UtilClient.is_unset(request.origin): body['Origin'] = request.origin if not UtilClient.is_unset(request.task): @@ -6698,18 +13630,32 @@ async def update_model_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.UpdateModelResponse: + """ + @summary 更新模型 + + @param request: UpdateModelRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateModelResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.accessibility): body['Accessibility'] = request.accessibility if not UtilClient.is_unset(request.domain): body['Domain'] = request.domain + if not UtilClient.is_unset(request.extra_info): + body['ExtraInfo'] = request.extra_info if not UtilClient.is_unset(request.model_description): body['ModelDescription'] = request.model_description if not UtilClient.is_unset(request.model_doc): body['ModelDoc'] = request.model_doc if not UtilClient.is_unset(request.model_name): body['ModelName'] = request.model_name + if not UtilClient.is_unset(request.model_type): + body['ModelType'] = request.model_type + if not UtilClient.is_unset(request.order_number): + body['OrderNumber'] = request.order_number if not UtilClient.is_unset(request.origin): body['Origin'] = request.origin if not UtilClient.is_unset(request.task): @@ -6739,6 +13685,12 @@ def update_model( model_id: str, request: aiwork_space_20210204_models.UpdateModelRequest, ) -> aiwork_space_20210204_models.UpdateModelResponse: + """ + @summary 更新模型 + + @param request: UpdateModelRequest + @return: UpdateModelResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.update_model_with_options(model_id, request, headers, runtime) @@ -6748,6 +13700,12 @@ async def update_model_async( model_id: str, request: aiwork_space_20210204_models.UpdateModelRequest, ) -> aiwork_space_20210204_models.UpdateModelResponse: + """ + @summary 更新模型 + + @param request: UpdateModelRequest + @return: UpdateModelResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.update_model_with_options_async(model_id, request, headers, runtime) @@ -6758,6 +13716,14 @@ def update_model_domains_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.UpdateModelDomainsResponse: + """ + @summary 更新模型领域 + + @param request: UpdateModelDomainsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateModelDomainsResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.model_domains): @@ -6788,6 +13754,14 @@ async def update_model_domains_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.UpdateModelDomainsResponse: + """ + @summary 更新模型领域 + + @param request: UpdateModelDomainsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateModelDomainsResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.model_domains): @@ -6816,6 +13790,12 @@ def update_model_domains( self, request: aiwork_space_20210204_models.UpdateModelDomainsRequest, ) -> aiwork_space_20210204_models.UpdateModelDomainsResponse: + """ + @summary 更新模型领域 + + @param request: UpdateModelDomainsRequest + @return: UpdateModelDomainsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.update_model_domains_with_options(request, headers, runtime) @@ -6824,6 +13804,12 @@ async def update_model_domains_async( self, request: aiwork_space_20210204_models.UpdateModelDomainsRequest, ) -> aiwork_space_20210204_models.UpdateModelDomainsResponse: + """ + @summary 更新模型领域 + + @param request: UpdateModelDomainsRequest + @return: UpdateModelDomainsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.update_model_domains_with_options_async(request, headers, runtime) @@ -6836,10 +13822,24 @@ def update_model_version_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.UpdateModelVersionResponse: + """ + @summary 更新模型版本 + + @param request: UpdateModelVersionRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateModelVersionResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.approval_status): body['ApprovalStatus'] = request.approval_status + if not UtilClient.is_unset(request.compression_spec): + body['CompressionSpec'] = request.compression_spec + if not UtilClient.is_unset(request.evaluation_spec): + body['EvaluationSpec'] = request.evaluation_spec + if not UtilClient.is_unset(request.extra_info): + body['ExtraInfo'] = request.extra_info if not UtilClient.is_unset(request.inference_spec): body['InferenceSpec'] = request.inference_spec if not UtilClient.is_unset(request.metrics): @@ -6882,10 +13882,24 @@ async def update_model_version_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.UpdateModelVersionResponse: + """ + @summary 更新模型版本 + + @param request: UpdateModelVersionRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateModelVersionResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.approval_status): body['ApprovalStatus'] = request.approval_status + if not UtilClient.is_unset(request.compression_spec): + body['CompressionSpec'] = request.compression_spec + if not UtilClient.is_unset(request.evaluation_spec): + body['EvaluationSpec'] = request.evaluation_spec + if not UtilClient.is_unset(request.extra_info): + body['ExtraInfo'] = request.extra_info if not UtilClient.is_unset(request.inference_spec): body['InferenceSpec'] = request.inference_spec if not UtilClient.is_unset(request.metrics): @@ -6926,6 +13940,12 @@ def update_model_version( version_name: str, request: aiwork_space_20210204_models.UpdateModelVersionRequest, ) -> aiwork_space_20210204_models.UpdateModelVersionResponse: + """ + @summary 更新模型版本 + + @param request: UpdateModelVersionRequest + @return: UpdateModelVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.update_model_version_with_options(model_id, version_name, request, headers, runtime) @@ -6936,10 +13956,140 @@ async def update_model_version_async( version_name: str, request: aiwork_space_20210204_models.UpdateModelVersionRequest, ) -> aiwork_space_20210204_models.UpdateModelVersionResponse: + """ + @summary 更新模型版本 + + @param request: UpdateModelVersionRequest + @return: UpdateModelVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.update_model_version_with_options_async(model_id, version_name, request, headers, runtime) + def update_service_template_with_options( + self, + service_template_id: str, + request: aiwork_space_20210204_models.UpdateServiceTemplateRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.UpdateServiceTemplateResponse: + """ + @summary 更新服务模版 + + @param request: UpdateServiceTemplateRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateServiceTemplateResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.inference_spec): + body['InferenceSpec'] = request.inference_spec + if not UtilClient.is_unset(request.order_number): + body['OrderNumber'] = request.order_number + if not UtilClient.is_unset(request.service_template_description): + body['ServiceTemplateDescription'] = request.service_template_description + if not UtilClient.is_unset(request.service_template_doc): + body['ServiceTemplateDoc'] = request.service_template_doc + if not UtilClient.is_unset(request.service_template_name): + body['ServiceTemplateName'] = request.service_template_name + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='UpdateServiceTemplate', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/servicetemplates/{OpenApiUtilClient.get_encode_param(service_template_id)}', + method='PUT', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.UpdateServiceTemplateResponse(), + self.call_api(params, req, runtime) + ) + + async def update_service_template_with_options_async( + self, + service_template_id: str, + request: aiwork_space_20210204_models.UpdateServiceTemplateRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.UpdateServiceTemplateResponse: + """ + @summary 更新服务模版 + + @param request: UpdateServiceTemplateRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateServiceTemplateResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.inference_spec): + body['InferenceSpec'] = request.inference_spec + if not UtilClient.is_unset(request.order_number): + body['OrderNumber'] = request.order_number + if not UtilClient.is_unset(request.service_template_description): + body['ServiceTemplateDescription'] = request.service_template_description + if not UtilClient.is_unset(request.service_template_doc): + body['ServiceTemplateDoc'] = request.service_template_doc + if not UtilClient.is_unset(request.service_template_name): + body['ServiceTemplateName'] = request.service_template_name + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='UpdateServiceTemplate', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/servicetemplates/{OpenApiUtilClient.get_encode_param(service_template_id)}', + method='PUT', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.UpdateServiceTemplateResponse(), + await self.call_api_async(params, req, runtime) + ) + + def update_service_template( + self, + service_template_id: str, + request: aiwork_space_20210204_models.UpdateServiceTemplateRequest, + ) -> aiwork_space_20210204_models.UpdateServiceTemplateResponse: + """ + @summary 更新服务模版 + + @param request: UpdateServiceTemplateRequest + @return: UpdateServiceTemplateResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.update_service_template_with_options(service_template_id, request, headers, runtime) + + async def update_service_template_async( + self, + service_template_id: str, + request: aiwork_space_20210204_models.UpdateServiceTemplateRequest, + ) -> aiwork_space_20210204_models.UpdateServiceTemplateResponse: + """ + @summary 更新服务模版 + + @param request: UpdateServiceTemplateRequest + @return: UpdateServiceTemplateResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.update_service_template_with_options_async(service_template_id, request, headers, runtime) + def update_workspace_with_options( self, workspace_id: str, @@ -6947,6 +14097,14 @@ def update_workspace_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.UpdateWorkspaceResponse: + """ + @summary 更新工作空间 + + @param request: UpdateWorkspaceRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateWorkspaceResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.description): @@ -6980,6 +14138,14 @@ async def update_workspace_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.UpdateWorkspaceResponse: + """ + @summary 更新工作空间 + + @param request: UpdateWorkspaceRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateWorkspaceResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.description): @@ -7011,6 +14177,12 @@ def update_workspace( workspace_id: str, request: aiwork_space_20210204_models.UpdateWorkspaceRequest, ) -> aiwork_space_20210204_models.UpdateWorkspaceResponse: + """ + @summary 更新工作空间 + + @param request: UpdateWorkspaceRequest + @return: UpdateWorkspaceResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.update_workspace_with_options(workspace_id, request, headers, runtime) @@ -7020,6 +14192,12 @@ async def update_workspace_async( workspace_id: str, request: aiwork_space_20210204_models.UpdateWorkspaceRequest, ) -> aiwork_space_20210204_models.UpdateWorkspaceResponse: + """ + @summary 更新工作空间 + + @param request: UpdateWorkspaceRequest + @return: UpdateWorkspaceResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.update_workspace_with_options_async(workspace_id, request, headers, runtime) @@ -7031,16 +14209,30 @@ def update_workspace_resource_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.UpdateWorkspaceResourceResponse: + """ + @summary 更新工作空间资源 + + @param request: UpdateWorkspaceResourceRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateWorkspaceResourceResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.group_name): body['GroupName'] = request.group_name if not UtilClient.is_unset(request.is_default): body['IsDefault'] = request.is_default + if not UtilClient.is_unset(request.labels): + body['Labels'] = request.labels if not UtilClient.is_unset(request.product_type): body['ProductType'] = request.product_type + if not UtilClient.is_unset(request.resource_ids): + body['ResourceIds'] = request.resource_ids if not UtilClient.is_unset(request.resource_type): body['ResourceType'] = request.resource_type + if not UtilClient.is_unset(request.spec): + body['Spec'] = request.spec req = open_api_models.OpenApiRequest( headers=headers, body=OpenApiUtilClient.parse_to_map(body) @@ -7068,16 +14260,30 @@ async def update_workspace_resource_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> aiwork_space_20210204_models.UpdateWorkspaceResourceResponse: + """ + @summary 更新工作空间资源 + + @param request: UpdateWorkspaceResourceRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateWorkspaceResourceResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.group_name): body['GroupName'] = request.group_name if not UtilClient.is_unset(request.is_default): body['IsDefault'] = request.is_default + if not UtilClient.is_unset(request.labels): + body['Labels'] = request.labels if not UtilClient.is_unset(request.product_type): body['ProductType'] = request.product_type + if not UtilClient.is_unset(request.resource_ids): + body['ResourceIds'] = request.resource_ids if not UtilClient.is_unset(request.resource_type): body['ResourceType'] = request.resource_type + if not UtilClient.is_unset(request.spec): + body['Spec'] = request.spec req = open_api_models.OpenApiRequest( headers=headers, body=OpenApiUtilClient.parse_to_map(body) @@ -7103,6 +14309,12 @@ def update_workspace_resource( workspace_id: str, request: aiwork_space_20210204_models.UpdateWorkspaceResourceRequest, ) -> aiwork_space_20210204_models.UpdateWorkspaceResourceResponse: + """ + @summary 更新工作空间资源 + + @param request: UpdateWorkspaceResourceRequest + @return: UpdateWorkspaceResourceResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.update_workspace_resource_with_options(workspace_id, request, headers, runtime) @@ -7112,6 +14324,128 @@ async def update_workspace_resource_async( workspace_id: str, request: aiwork_space_20210204_models.UpdateWorkspaceResourceRequest, ) -> aiwork_space_20210204_models.UpdateWorkspaceResourceResponse: + """ + @summary 更新工作空间资源 + + @param request: UpdateWorkspaceResourceRequest + @return: UpdateWorkspaceResourceResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.update_workspace_resource_with_options_async(workspace_id, request, headers, runtime) + + def update_workspace_role_with_options( + self, + workspace_id: str, + role_id: str, + request: aiwork_space_20210204_models.UpdateWorkspaceRoleRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.UpdateWorkspaceRoleResponse: + """ + @summary 更新工作空间角色 + + @param request: UpdateWorkspaceRoleRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateWorkspaceRoleResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.module_permissions): + body['ModulePermissions'] = request.module_permissions + if not UtilClient.is_unset(request.role_name): + body['RoleName'] = request.role_name + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='UpdateWorkspaceRole', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/roles/{OpenApiUtilClient.get_encode_param(role_id)}', + method='PUT', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.UpdateWorkspaceRoleResponse(), + self.call_api(params, req, runtime) + ) + + async def update_workspace_role_with_options_async( + self, + workspace_id: str, + role_id: str, + request: aiwork_space_20210204_models.UpdateWorkspaceRoleRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> aiwork_space_20210204_models.UpdateWorkspaceRoleResponse: + """ + @summary 更新工作空间角色 + + @param request: UpdateWorkspaceRoleRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateWorkspaceRoleResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.module_permissions): + body['ModulePermissions'] = request.module_permissions + if not UtilClient.is_unset(request.role_name): + body['RoleName'] = request.role_name + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='UpdateWorkspaceRole', + version='2021-02-04', + protocol='HTTPS', + pathname=f'/api/v1/workspaces/{OpenApiUtilClient.get_encode_param(workspace_id)}/roles/{OpenApiUtilClient.get_encode_param(role_id)}', + method='PUT', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + aiwork_space_20210204_models.UpdateWorkspaceRoleResponse(), + await self.call_api_async(params, req, runtime) + ) + + def update_workspace_role( + self, + workspace_id: str, + role_id: str, + request: aiwork_space_20210204_models.UpdateWorkspaceRoleRequest, + ) -> aiwork_space_20210204_models.UpdateWorkspaceRoleResponse: + """ + @summary 更新工作空间角色 + + @param request: UpdateWorkspaceRoleRequest + @return: UpdateWorkspaceRoleResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.update_workspace_role_with_options(workspace_id, role_id, request, headers, runtime) + + async def update_workspace_role_async( + self, + workspace_id: str, + role_id: str, + request: aiwork_space_20210204_models.UpdateWorkspaceRoleRequest, + ) -> aiwork_space_20210204_models.UpdateWorkspaceRoleResponse: + """ + @summary 更新工作空间角色 + + @param request: UpdateWorkspaceRoleRequest + @return: UpdateWorkspaceRoleResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.update_workspace_role_with_options_async(workspace_id, role_id, request, headers, runtime) diff --git a/pai/libs/alibabacloud_aiworkspace20210204/models.py b/pai/libs/alibabacloud_aiworkspace20210204/models.py index 744f94c..db46cb0 100644 --- a/pai/libs/alibabacloud_aiworkspace20210204/models.py +++ b/pai/libs/alibabacloud_aiworkspace20210204/models.py @@ -109,6 +109,57 @@ def from_map(self, m: dict = None): return self +class Collection(TeaModel): + def __init__( + self, + collection_name: str = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + owner_id: str = None, + user_id: str = None, + ): + self.collection_name = collection_name + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.owner_id = owner_id + self.user_id = user_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.collection_name is not None: + result['CollectionName'] = self.collection_name + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.owner_id is not None: + result['OwnerId'] = self.owner_id + if self.user_id is not None: + result['UserId'] = self.user_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('CollectionName') is not None: + self.collection_name = m.get('CollectionName') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('OwnerId') is not None: + self.owner_id = m.get('OwnerId') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + return self + + class Label(TeaModel): def __init__( self, @@ -142,6 +193,113 @@ def from_map(self, m: dict = None): return self +class DatasetVersion(TeaModel): + def __init__( + self, + data_count: int = None, + data_size: int = None, + data_source_type: str = None, + description: str = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + labels: List[Label] = None, + options: str = None, + property: str = None, + source_id: str = None, + source_type: str = None, + uri: str = None, + version_name: str = None, + ): + self.data_count = data_count + self.data_size = data_size + self.data_source_type = data_source_type + self.description = description + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.labels = labels + self.options = options + self.property = property + self.source_id = source_id + self.source_type = source_type + self.uri = uri + self.version_name = version_name + + def validate(self): + if self.labels: + for k in self.labels: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.data_count is not None: + result['DataCount'] = self.data_count + if self.data_size is not None: + result['DataSize'] = self.data_size + if self.data_source_type is not None: + result['DataSourceType'] = self.data_source_type + if self.description is not None: + result['Description'] = self.description + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.options is not None: + result['Options'] = self.options + if self.property is not None: + result['Property'] = self.property + if self.source_id is not None: + result['SourceId'] = self.source_id + if self.source_type is not None: + result['SourceType'] = self.source_type + if self.uri is not None: + result['Uri'] = self.uri + if self.version_name is not None: + result['VersionName'] = self.version_name + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('DataCount') is not None: + self.data_count = m.get('DataCount') + if m.get('DataSize') is not None: + self.data_size = m.get('DataSize') + if m.get('DataSourceType') is not None: + self.data_source_type = m.get('DataSourceType') + if m.get('Description') is not None: + self.description = m.get('Description') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = Label() + self.labels.append(temp_model.from_map(k)) + if m.get('Options') is not None: + self.options = m.get('Options') + if m.get('Property') is not None: + self.property = m.get('Property') + if m.get('SourceId') is not None: + self.source_id = m.get('SourceId') + if m.get('SourceType') is not None: + self.source_type = m.get('SourceType') + if m.get('Uri') is not None: + self.uri = m.get('Uri') + if m.get('VersionName') is not None: + self.version_name = m.get('VersionName') + return self + + class Dataset(TeaModel): def __init__( self, @@ -153,13 +311,17 @@ def __init__( gmt_create_time: str = None, gmt_modified_time: str = None, labels: List[Label] = None, + latest_version: DatasetVersion = None, name: str = None, options: str = None, owner_id: str = None, property: str = None, provider_type: str = None, + source_dataset_id: str = None, + source_dataset_version: str = None, source_id: str = None, source_type: str = None, + tag_template_type: str = None, uri: str = None, user_id: str = None, workspace_id: str = None, @@ -172,13 +334,17 @@ def __init__( self.gmt_create_time = gmt_create_time self.gmt_modified_time = gmt_modified_time self.labels = labels + self.latest_version = latest_version self.name = name self.options = options self.owner_id = owner_id self.property = property self.provider_type = provider_type + self.source_dataset_id = source_dataset_id + self.source_dataset_version = source_dataset_version self.source_id = source_id self.source_type = source_type + self.tag_template_type = tag_template_type self.uri = uri self.user_id = user_id self.workspace_id = workspace_id @@ -188,6 +354,8 @@ def validate(self): for k in self.labels: if k: k.validate() + if self.latest_version: + self.latest_version.validate() def to_map(self): _map = super().to_map() @@ -213,6 +381,8 @@ def to_map(self): if self.labels is not None: for k in self.labels: result['Labels'].append(k.to_map() if k else None) + if self.latest_version is not None: + result['LatestVersion'] = self.latest_version.to_map() if self.name is not None: result['Name'] = self.name if self.options is not None: @@ -223,10 +393,16 @@ def to_map(self): result['Property'] = self.property if self.provider_type is not None: result['ProviderType'] = self.provider_type + if self.source_dataset_id is not None: + result['SourceDatasetId'] = self.source_dataset_id + if self.source_dataset_version is not None: + result['SourceDatasetVersion'] = self.source_dataset_version if self.source_id is not None: result['SourceId'] = self.source_id if self.source_type is not None: result['SourceType'] = self.source_type + if self.tag_template_type is not None: + result['TagTemplateType'] = self.tag_template_type if self.uri is not None: result['Uri'] = self.uri if self.user_id is not None: @@ -256,6 +432,9 @@ def from_map(self, m: dict = None): for k in m.get('Labels'): temp_model = Label() self.labels.append(temp_model.from_map(k)) + if m.get('LatestVersion') is not None: + temp_model = DatasetVersion() + self.latest_version = temp_model.from_map(m['LatestVersion']) if m.get('Name') is not None: self.name = m.get('Name') if m.get('Options') is not None: @@ -266,10 +445,16 @@ def from_map(self, m: dict = None): self.property = m.get('Property') if m.get('ProviderType') is not None: self.provider_type = m.get('ProviderType') + if m.get('SourceDatasetId') is not None: + self.source_dataset_id = m.get('SourceDatasetId') + if m.get('SourceDatasetVersion') is not None: + self.source_dataset_version = m.get('SourceDatasetVersion') if m.get('SourceId') is not None: self.source_id = m.get('SourceId') if m.get('SourceType') is not None: self.source_type = m.get('SourceType') + if m.get('TagTemplateType') is not None: + self.tag_template_type = m.get('TagTemplateType') if m.get('Uri') is not None: self.uri = m.get('Uri') if m.get('UserId') is not None: @@ -312,50 +497,33 @@ def from_map(self, m: dict = None): return self -class ModelVersion(TeaModel): +class Experiment(TeaModel): def __init__( self, - approval_status: str = None, - format_type: str = None, - framework_type: str = None, + artifact_uri: str = None, + experiment_id: str = None, gmt_create_time: str = None, gmt_modified_time: str = None, - inference_spec: Dict[str, Any] = None, - labels: List[Label] = None, - metrics: Dict[str, Any] = None, - options: str = None, + labels: List[Dict[str, Any]] = None, + name: str = None, owner_id: str = None, - source_id: str = None, - source_type: str = None, - training_spec: Dict[str, Any] = None, - uri: str = None, + tensorboard_log_uri: str = None, user_id: str = None, - version_description: str = None, - version_name: str = None, + workspace_id: str = None, ): - self.approval_status = approval_status - self.format_type = format_type - self.framework_type = framework_type + self.artifact_uri = artifact_uri + self.experiment_id = experiment_id self.gmt_create_time = gmt_create_time self.gmt_modified_time = gmt_modified_time - self.inference_spec = inference_spec self.labels = labels - self.metrics = metrics - self.options = options + self.name = name self.owner_id = owner_id - self.source_id = source_id - self.source_type = source_type - self.training_spec = training_spec - self.uri = uri + self.tensorboard_log_uri = tensorboard_log_uri self.user_id = user_id - self.version_description = version_description - self.version_name = version_name + self.workspace_id = workspace_id def validate(self): - if self.labels: - for k in self.labels: - if k: - k.validate() + pass def to_map(self): _map = super().to_map() @@ -363,130 +531,70 @@ def to_map(self): return _map result = dict() - if self.approval_status is not None: - result['ApprovalStatus'] = self.approval_status - if self.format_type is not None: - result['FormatType'] = self.format_type - if self.framework_type is not None: - result['FrameworkType'] = self.framework_type + if self.artifact_uri is not None: + result['ArtifactUri'] = self.artifact_uri + if self.experiment_id is not None: + result['ExperimentId'] = self.experiment_id if self.gmt_create_time is not None: result['GmtCreateTime'] = self.gmt_create_time if self.gmt_modified_time is not None: result['GmtModifiedTime'] = self.gmt_modified_time - if self.inference_spec is not None: - result['InferenceSpec'] = self.inference_spec - result['Labels'] = [] if self.labels is not None: - for k in self.labels: - result['Labels'].append(k.to_map() if k else None) - if self.metrics is not None: - result['Metrics'] = self.metrics - if self.options is not None: - result['Options'] = self.options + result['Labels'] = self.labels + if self.name is not None: + result['Name'] = self.name if self.owner_id is not None: result['OwnerId'] = self.owner_id - if self.source_id is not None: - result['SourceId'] = self.source_id - if self.source_type is not None: - result['SourceType'] = self.source_type - if self.training_spec is not None: - result['TrainingSpec'] = self.training_spec - if self.uri is not None: - result['Uri'] = self.uri + if self.tensorboard_log_uri is not None: + result['TensorboardLogUri'] = self.tensorboard_log_uri if self.user_id is not None: result['UserId'] = self.user_id - if self.version_description is not None: - result['VersionDescription'] = self.version_description - if self.version_name is not None: - result['VersionName'] = self.version_name + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ApprovalStatus') is not None: - self.approval_status = m.get('ApprovalStatus') - if m.get('FormatType') is not None: - self.format_type = m.get('FormatType') - if m.get('FrameworkType') is not None: - self.framework_type = m.get('FrameworkType') + if m.get('ArtifactUri') is not None: + self.artifact_uri = m.get('ArtifactUri') + if m.get('ExperimentId') is not None: + self.experiment_id = m.get('ExperimentId') if m.get('GmtCreateTime') is not None: self.gmt_create_time = m.get('GmtCreateTime') if m.get('GmtModifiedTime') is not None: self.gmt_modified_time = m.get('GmtModifiedTime') - if m.get('InferenceSpec') is not None: - self.inference_spec = m.get('InferenceSpec') - self.labels = [] if m.get('Labels') is not None: - for k in m.get('Labels'): - temp_model = Label() - self.labels.append(temp_model.from_map(k)) - if m.get('Metrics') is not None: - self.metrics = m.get('Metrics') - if m.get('Options') is not None: - self.options = m.get('Options') + self.labels = m.get('Labels') + if m.get('Name') is not None: + self.name = m.get('Name') if m.get('OwnerId') is not None: self.owner_id = m.get('OwnerId') - if m.get('SourceId') is not None: - self.source_id = m.get('SourceId') - if m.get('SourceType') is not None: - self.source_type = m.get('SourceType') - if m.get('TrainingSpec') is not None: - self.training_spec = m.get('TrainingSpec') - if m.get('Uri') is not None: - self.uri = m.get('Uri') + if m.get('TensorboardLogUri') is not None: + self.tensorboard_log_uri = m.get('TensorboardLogUri') if m.get('UserId') is not None: self.user_id = m.get('UserId') - if m.get('VersionDescription') is not None: - self.version_description = m.get('VersionDescription') - if m.get('VersionName') is not None: - self.version_name = m.get('VersionName') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class Model(TeaModel): +class ExperimentLabel(TeaModel): def __init__( self, - accessibility: str = None, - domain: str = None, + experiment_id: str = None, gmt_create_time: str = None, gmt_modified_time: str = None, - labels: List[Label] = None, - latest_version: ModelVersion = None, - model_description: str = None, - model_doc: str = None, - model_id: str = None, - model_name: str = None, - origin: str = None, - owner_id: str = None, - provider: str = None, - task: str = None, - user_id: str = None, - workspace_id: str = None, + key: str = None, + value: str = None, ): - self.accessibility = accessibility - self.domain = domain + self.experiment_id = experiment_id self.gmt_create_time = gmt_create_time self.gmt_modified_time = gmt_modified_time - self.labels = labels - self.latest_version = latest_version - self.model_description = model_description - self.model_doc = model_doc - self.model_id = model_id - self.model_name = model_name - self.origin = origin - self.owner_id = owner_id - self.provider = provider - self.task = task - self.user_id = user_id - self.workspace_id = workspace_id + self.key = key + self.value = value def validate(self): - if self.labels: - for k in self.labels: - if k: - k.validate() - if self.latest_version: - self.latest_version.validate() + pass def to_map(self): _map = super().to_map() @@ -494,111 +602,34 @@ def to_map(self): return _map result = dict() - if self.accessibility is not None: - result['Accessibility'] = self.accessibility - if self.domain is not None: - result['Domain'] = self.domain + if self.experiment_id is not None: + result['ExperimentId'] = self.experiment_id if self.gmt_create_time is not None: result['GmtCreateTime'] = self.gmt_create_time if self.gmt_modified_time is not None: result['GmtModifiedTime'] = self.gmt_modified_time - result['Labels'] = [] - if self.labels is not None: - for k in self.labels: - result['Labels'].append(k.to_map() if k else None) - if self.latest_version is not None: - result['LatestVersion'] = self.latest_version.to_map() - if self.model_description is not None: - result['ModelDescription'] = self.model_description - if self.model_doc is not None: - result['ModelDoc'] = self.model_doc - if self.model_id is not None: - result['ModelId'] = self.model_id - if self.model_name is not None: - result['ModelName'] = self.model_name - if self.origin is not None: - result['Origin'] = self.origin - if self.owner_id is not None: - result['OwnerId'] = self.owner_id - if self.provider is not None: - result['Provider'] = self.provider - if self.task is not None: - result['Task'] = self.task - if self.user_id is not None: - result['UserId'] = self.user_id - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Accessibility') is not None: - self.accessibility = m.get('Accessibility') - if m.get('Domain') is not None: - self.domain = m.get('Domain') + if m.get('ExperimentId') is not None: + self.experiment_id = m.get('ExperimentId') if m.get('GmtCreateTime') is not None: self.gmt_create_time = m.get('GmtCreateTime') if m.get('GmtModifiedTime') is not None: self.gmt_modified_time = m.get('GmtModifiedTime') - self.labels = [] - if m.get('Labels') is not None: - for k in m.get('Labels'): - temp_model = Label() - self.labels.append(temp_model.from_map(k)) - if m.get('LatestVersion') is not None: - temp_model = ModelVersion() - self.latest_version = temp_model.from_map(m['LatestVersion']) - if m.get('ModelDescription') is not None: - self.model_description = m.get('ModelDescription') - if m.get('ModelDoc') is not None: - self.model_doc = m.get('ModelDoc') - if m.get('ModelId') is not None: - self.model_id = m.get('ModelId') - if m.get('ModelName') is not None: - self.model_name = m.get('ModelName') - if m.get('Origin') is not None: - self.origin = m.get('Origin') - if m.get('OwnerId') is not None: - self.owner_id = m.get('OwnerId') - if m.get('Provider') is not None: - self.provider = m.get('Provider') - if m.get('Task') is not None: - self.task = m.get('Task') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') - return self - - -class ResourcesExecutorValue(TeaModel): - def __init__( - self, - owner_id: str = None, - ): - self.owner_id = owner_id - - def validate(self): - pass - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.owner_id is not None: - result['OwnerId'] = self.owner_id - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('OwnerId') is not None: - self.owner_id = m.get('OwnerId') + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') return self -class AddImageRequestLabels(TeaModel): +class LabelInfo(TeaModel): def __init__( self, key: str = None, @@ -631,28 +662,21 @@ def from_map(self, m: dict = None): return self -class AddImageRequest(TeaModel): +class LineageEntity(TeaModel): def __init__( self, - accessibility: str = None, - description: str = None, - image_uri: str = None, - labels: List[AddImageRequestLabels] = None, + attributes: Dict[str, Any] = None, + entity_type: str = None, name: str = None, - workspace_id: str = None, + qualified_name: str = None, ): - self.accessibility = accessibility - self.description = description - self.image_uri = image_uri - self.labels = labels + self.attributes = attributes + self.entity_type = entity_type self.name = name - self.workspace_id = workspace_id + self.qualified_name = qualified_name def validate(self): - if self.labels: - for k in self.labels: - if k: - k.validate() + pass def to_map(self): _map = super().to_map() @@ -660,50 +684,39 @@ def to_map(self): return _map result = dict() - if self.accessibility is not None: - result['Accessibility'] = self.accessibility - if self.description is not None: - result['Description'] = self.description - if self.image_uri is not None: - result['ImageUri'] = self.image_uri - result['Labels'] = [] - if self.labels is not None: - for k in self.labels: - result['Labels'].append(k.to_map() if k else None) + if self.attributes is not None: + result['Attributes'] = self.attributes + if self.entity_type is not None: + result['EntityType'] = self.entity_type if self.name is not None: result['Name'] = self.name - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.qualified_name is not None: + result['QualifiedName'] = self.qualified_name return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Accessibility') is not None: - self.accessibility = m.get('Accessibility') - if m.get('Description') is not None: - self.description = m.get('Description') - if m.get('ImageUri') is not None: - self.image_uri = m.get('ImageUri') - self.labels = [] - if m.get('Labels') is not None: - for k in m.get('Labels'): - temp_model = AddImageRequestLabels() - self.labels.append(temp_model.from_map(k)) + if m.get('Attributes') is not None: + self.attributes = m.get('Attributes') + if m.get('EntityType') is not None: + self.entity_type = m.get('EntityType') if m.get('Name') is not None: self.name = m.get('Name') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') + if m.get('QualifiedName') is not None: + self.qualified_name = m.get('QualifiedName') return self -class AddImageResponseBody(TeaModel): +class LineageRelation(TeaModel): def __init__( self, - image_id: str = None, - request_id: str = None, + dest_entity_qualified_name: str = None, + relationship_guid: str = None, + src_entity_qualified_name: str = None, ): - self.image_id = image_id - self.request_id = request_id + self.dest_entity_qualified_name = dest_entity_qualified_name + self.relationship_guid = relationship_guid + self.src_entity_qualified_name = src_entity_qualified_name def validate(self): pass @@ -714,38 +727,75 @@ def to_map(self): return _map result = dict() - if self.image_id is not None: - result['ImageId'] = self.image_id - if self.request_id is not None: - result['RequestId'] = self.request_id + if self.dest_entity_qualified_name is not None: + result['DestEntityQualifiedName'] = self.dest_entity_qualified_name + if self.relationship_guid is not None: + result['RelationshipGuid'] = self.relationship_guid + if self.src_entity_qualified_name is not None: + result['SrcEntityQualifiedName'] = self.src_entity_qualified_name return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ImageId') is not None: - self.image_id = m.get('ImageId') - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') + if m.get('DestEntityQualifiedName') is not None: + self.dest_entity_qualified_name = m.get('DestEntityQualifiedName') + if m.get('RelationshipGuid') is not None: + self.relationship_guid = m.get('RelationshipGuid') + if m.get('SrcEntityQualifiedName') is not None: + self.src_entity_qualified_name = m.get('SrcEntityQualifiedName') return self -class AddImageResponse(TeaModel): +class ModelVersion(TeaModel): def __init__( self, - headers: Dict[str, str] = None, - status_code: int = None, - body: AddImageResponseBody = None, + approval_status: str = None, + compression_spec: Dict[str, Any] = None, + evaluation_spec: Dict[str, Any] = None, + extra_info: Dict[str, Any] = None, + format_type: str = None, + framework_type: str = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + inference_spec: Dict[str, Any] = None, + labels: List[Label] = None, + metrics: Dict[str, Any] = None, + options: str = None, + owner_id: str = None, + source_id: str = None, + source_type: str = None, + training_spec: Dict[str, Any] = None, + uri: str = None, + user_id: str = None, + version_description: str = None, + version_name: str = None, ): - self.headers = headers - self.status_code = status_code - self.body = body + self.approval_status = approval_status + self.compression_spec = compression_spec + self.evaluation_spec = evaluation_spec + self.extra_info = extra_info + self.format_type = format_type + self.framework_type = framework_type + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.inference_spec = inference_spec + self.labels = labels + self.metrics = metrics + self.options = options + self.owner_id = owner_id + self.source_id = source_id + self.source_type = source_type + self.training_spec = training_spec + self.uri = uri + self.user_id = user_id + self.version_description = version_description + self.version_name = version_name def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() + if self.labels: + for k in self.labels: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -753,33 +803,6587 @@ def to_map(self): return _map result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = AddImageResponseBody() - self.body = temp_model.from_map(m['body']) - return self - - -class AddImageLabelsRequestLabels(TeaModel): - def __init__( + if self.approval_status is not None: + result['ApprovalStatus'] = self.approval_status + if self.compression_spec is not None: + result['CompressionSpec'] = self.compression_spec + if self.evaluation_spec is not None: + result['EvaluationSpec'] = self.evaluation_spec + if self.extra_info is not None: + result['ExtraInfo'] = self.extra_info + if self.format_type is not None: + result['FormatType'] = self.format_type + if self.framework_type is not None: + result['FrameworkType'] = self.framework_type + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.inference_spec is not None: + result['InferenceSpec'] = self.inference_spec + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.metrics is not None: + result['Metrics'] = self.metrics + if self.options is not None: + result['Options'] = self.options + if self.owner_id is not None: + result['OwnerId'] = self.owner_id + if self.source_id is not None: + result['SourceId'] = self.source_id + if self.source_type is not None: + result['SourceType'] = self.source_type + if self.training_spec is not None: + result['TrainingSpec'] = self.training_spec + if self.uri is not None: + result['Uri'] = self.uri + if self.user_id is not None: + result['UserId'] = self.user_id + if self.version_description is not None: + result['VersionDescription'] = self.version_description + if self.version_name is not None: + result['VersionName'] = self.version_name + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ApprovalStatus') is not None: + self.approval_status = m.get('ApprovalStatus') + if m.get('CompressionSpec') is not None: + self.compression_spec = m.get('CompressionSpec') + if m.get('EvaluationSpec') is not None: + self.evaluation_spec = m.get('EvaluationSpec') + if m.get('ExtraInfo') is not None: + self.extra_info = m.get('ExtraInfo') + if m.get('FormatType') is not None: + self.format_type = m.get('FormatType') + if m.get('FrameworkType') is not None: + self.framework_type = m.get('FrameworkType') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('InferenceSpec') is not None: + self.inference_spec = m.get('InferenceSpec') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = Label() + self.labels.append(temp_model.from_map(k)) + if m.get('Metrics') is not None: + self.metrics = m.get('Metrics') + if m.get('Options') is not None: + self.options = m.get('Options') + if m.get('OwnerId') is not None: + self.owner_id = m.get('OwnerId') + if m.get('SourceId') is not None: + self.source_id = m.get('SourceId') + if m.get('SourceType') is not None: + self.source_type = m.get('SourceType') + if m.get('TrainingSpec') is not None: + self.training_spec = m.get('TrainingSpec') + if m.get('Uri') is not None: + self.uri = m.get('Uri') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('VersionDescription') is not None: + self.version_description = m.get('VersionDescription') + if m.get('VersionName') is not None: + self.version_name = m.get('VersionName') + return self + + +class Model(TeaModel): + def __init__( + self, + accessibility: str = None, + domain: str = None, + extra_info: Dict[str, Any] = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + labels: List[Label] = None, + latest_version: ModelVersion = None, + model_description: str = None, + model_doc: str = None, + model_id: str = None, + model_name: str = None, + model_type: str = None, + order_number: int = None, + origin: str = None, + owner_id: str = None, + provider: str = None, + task: str = None, + user_id: str = None, + workspace_id: str = None, + ): + self.accessibility = accessibility + self.domain = domain + self.extra_info = extra_info + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.labels = labels + self.latest_version = latest_version + self.model_description = model_description + self.model_doc = model_doc + self.model_id = model_id + self.model_name = model_name + self.model_type = model_type + self.order_number = order_number + self.origin = origin + self.owner_id = owner_id + self.provider = provider + self.task = task + self.user_id = user_id + self.workspace_id = workspace_id + + def validate(self): + if self.labels: + for k in self.labels: + if k: + k.validate() + if self.latest_version: + self.latest_version.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + if self.domain is not None: + result['Domain'] = self.domain + if self.extra_info is not None: + result['ExtraInfo'] = self.extra_info + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.latest_version is not None: + result['LatestVersion'] = self.latest_version.to_map() + if self.model_description is not None: + result['ModelDescription'] = self.model_description + if self.model_doc is not None: + result['ModelDoc'] = self.model_doc + if self.model_id is not None: + result['ModelId'] = self.model_id + if self.model_name is not None: + result['ModelName'] = self.model_name + if self.model_type is not None: + result['ModelType'] = self.model_type + if self.order_number is not None: + result['OrderNumber'] = self.order_number + if self.origin is not None: + result['Origin'] = self.origin + if self.owner_id is not None: + result['OwnerId'] = self.owner_id + if self.provider is not None: + result['Provider'] = self.provider + if self.task is not None: + result['Task'] = self.task + if self.user_id is not None: + result['UserId'] = self.user_id + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + if m.get('Domain') is not None: + self.domain = m.get('Domain') + if m.get('ExtraInfo') is not None: + self.extra_info = m.get('ExtraInfo') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = Label() + self.labels.append(temp_model.from_map(k)) + if m.get('LatestVersion') is not None: + temp_model = ModelVersion() + self.latest_version = temp_model.from_map(m['LatestVersion']) + if m.get('ModelDescription') is not None: + self.model_description = m.get('ModelDescription') + if m.get('ModelDoc') is not None: + self.model_doc = m.get('ModelDoc') + if m.get('ModelId') is not None: + self.model_id = m.get('ModelId') + if m.get('ModelName') is not None: + self.model_name = m.get('ModelName') + if m.get('ModelType') is not None: + self.model_type = m.get('ModelType') + if m.get('OrderNumber') is not None: + self.order_number = m.get('OrderNumber') + if m.get('Origin') is not None: + self.origin = m.get('Origin') + if m.get('OwnerId') is not None: + self.owner_id = m.get('OwnerId') + if m.get('Provider') is not None: + self.provider = m.get('Provider') + if m.get('Task') is not None: + self.task = m.get('Task') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + return self + + +class ServiceTemplate(TeaModel): + def __init__( + self, + gmt_create_time: str = None, + gmt_modified_time: str = None, + inference_spec: Dict[str, Any] = None, + labels: List[Label] = None, + order_number: int = None, + owner_id: str = None, + provider: str = None, + service_template_description: str = None, + service_template_doc: str = None, + service_template_id: str = None, + service_template_name: str = None, + user_id: str = None, + ): + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.inference_spec = inference_spec + self.labels = labels + self.order_number = order_number + self.owner_id = owner_id + self.provider = provider + self.service_template_description = service_template_description + self.service_template_doc = service_template_doc + self.service_template_id = service_template_id + self.service_template_name = service_template_name + self.user_id = user_id + + def validate(self): + if self.labels: + for k in self.labels: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.inference_spec is not None: + result['InferenceSpec'] = self.inference_spec + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.order_number is not None: + result['OrderNumber'] = self.order_number + if self.owner_id is not None: + result['OwnerId'] = self.owner_id + if self.provider is not None: + result['Provider'] = self.provider + if self.service_template_description is not None: + result['ServiceTemplateDescription'] = self.service_template_description + if self.service_template_doc is not None: + result['ServiceTemplateDoc'] = self.service_template_doc + if self.service_template_id is not None: + result['ServiceTemplateId'] = self.service_template_id + if self.service_template_name is not None: + result['ServiceTemplateName'] = self.service_template_name + if self.user_id is not None: + result['UserId'] = self.user_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('InferenceSpec') is not None: + self.inference_spec = m.get('InferenceSpec') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = Label() + self.labels.append(temp_model.from_map(k)) + if m.get('OrderNumber') is not None: + self.order_number = m.get('OrderNumber') + if m.get('OwnerId') is not None: + self.owner_id = m.get('OwnerId') + if m.get('Provider') is not None: + self.provider = m.get('Provider') + if m.get('ServiceTemplateDescription') is not None: + self.service_template_description = m.get('ServiceTemplateDescription') + if m.get('ServiceTemplateDoc') is not None: + self.service_template_doc = m.get('ServiceTemplateDoc') + if m.get('ServiceTemplateId') is not None: + self.service_template_id = m.get('ServiceTemplateId') + if m.get('ServiceTemplateName') is not None: + self.service_template_name = m.get('ServiceTemplateName') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + return self + + +class Relation(TeaModel): + def __init__( + self, + err_msg: str = None, + lineage_relation: LineageRelation = None, + result: bool = None, + ): + self.err_msg = err_msg + self.lineage_relation = lineage_relation + self.result = result + + def validate(self): + if self.lineage_relation: + self.lineage_relation.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.err_msg is not None: + result['ErrMsg'] = self.err_msg + if self.lineage_relation is not None: + result['LineageRelation'] = self.lineage_relation.to_map() + if self.result is not None: + result['Result'] = self.result + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ErrMsg') is not None: + self.err_msg = m.get('ErrMsg') + if m.get('LineageRelation') is not None: + temp_model = LineageRelation() + self.lineage_relation = temp_model.from_map(m['LineageRelation']) + if m.get('Result') is not None: + self.result = m.get('Result') + return self + + +class Relationship(TeaModel): + def __init__( + self, + attributes: Dict[str, Any] = None, + data_channel: str = None, + relationship_guid: str = None, + relationship_type: str = None, + ): + self.attributes = attributes + self.data_channel = data_channel + self.relationship_guid = relationship_guid + self.relationship_type = relationship_type + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.attributes is not None: + result['Attributes'] = self.attributes + if self.data_channel is not None: + result['DataChannel'] = self.data_channel + if self.relationship_guid is not None: + result['RelationshipGuid'] = self.relationship_guid + if self.relationship_type is not None: + result['RelationshipType'] = self.relationship_type + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Attributes') is not None: + self.attributes = m.get('Attributes') + if m.get('DataChannel') is not None: + self.data_channel = m.get('DataChannel') + if m.get('RelationshipGuid') is not None: + self.relationship_guid = m.get('RelationshipGuid') + if m.get('RelationshipType') is not None: + self.relationship_type = m.get('RelationshipType') + return self + + +class Trial(TeaModel): + def __init__( + self, + accessibility: str = None, + experiment_id: str = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + labels: List[Dict[str, Any]] = None, + name: str = None, + owner_id: str = None, + source_id: str = None, + source_type: str = None, + trial_id: str = None, + user_id: str = None, + workspace_id: str = None, + ): + self.accessibility = accessibility + self.experiment_id = experiment_id + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.labels = labels + self.name = name + self.owner_id = owner_id + self.source_id = source_id + self.source_type = source_type + self.trial_id = trial_id + self.user_id = user_id + self.workspace_id = workspace_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + if self.experiment_id is not None: + result['ExperimentId'] = self.experiment_id + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.labels is not None: + result['Labels'] = self.labels + if self.name is not None: + result['Name'] = self.name + if self.owner_id is not None: + result['OwnerId'] = self.owner_id + if self.source_id is not None: + result['SourceId'] = self.source_id + if self.source_type is not None: + result['SourceType'] = self.source_type + if self.trial_id is not None: + result['TrialId'] = self.trial_id + if self.user_id is not None: + result['UserId'] = self.user_id + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + if m.get('ExperimentId') is not None: + self.experiment_id = m.get('ExperimentId') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('Labels') is not None: + self.labels = m.get('Labels') + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('OwnerId') is not None: + self.owner_id = m.get('OwnerId') + if m.get('SourceId') is not None: + self.source_id = m.get('SourceId') + if m.get('SourceType') is not None: + self.source_type = m.get('SourceType') + if m.get('TrialId') is not None: + self.trial_id = m.get('TrialId') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + return self + + +class TrialLabel(TeaModel): + def __init__( + self, + gmt_create_time: str = None, + gmt_modified_time: str = None, + key: str = None, + trial_id: str = None, + value: str = None, + ): + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.key = key + self.trial_id = trial_id + self.value = value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.key is not None: + result['Key'] = self.key + if self.trial_id is not None: + result['TrialId'] = self.trial_id + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('TrialId') is not None: + self.trial_id = m.get('TrialId') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + +class AddImageRequestLabels(TeaModel): + def __init__( + self, + key: str = None, + value: str = None, + ): + self.key = key + self.value = value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + +class AddImageRequest(TeaModel): + def __init__( + self, + accessibility: str = None, + description: str = None, + image_id: str = None, + image_uri: str = None, + labels: List[AddImageRequestLabels] = None, + name: str = None, + size: int = None, + workspace_id: str = None, + ): + self.accessibility = accessibility + self.description = description + self.image_id = image_id + # This parameter is required. + self.image_uri = image_uri + self.labels = labels + # This parameter is required. + self.name = name + self.size = size + self.workspace_id = workspace_id + + def validate(self): + if self.labels: + for k in self.labels: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + if self.description is not None: + result['Description'] = self.description + if self.image_id is not None: + result['ImageId'] = self.image_id + if self.image_uri is not None: + result['ImageUri'] = self.image_uri + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.name is not None: + result['Name'] = self.name + if self.size is not None: + result['Size'] = self.size + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + if m.get('Description') is not None: + self.description = m.get('Description') + if m.get('ImageId') is not None: + self.image_id = m.get('ImageId') + if m.get('ImageUri') is not None: + self.image_uri = m.get('ImageUri') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = AddImageRequestLabels() + self.labels.append(temp_model.from_map(k)) + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('Size') is not None: + self.size = m.get('Size') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + return self + + +class AddImageResponseBody(TeaModel): + def __init__( + self, + image_id: str = None, + request_id: str = None, + ): + self.image_id = image_id + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.image_id is not None: + result['ImageId'] = self.image_id + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ImageId') is not None: + self.image_id = m.get('ImageId') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class AddImageResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: AddImageResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = AddImageResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class AddImageLabelsRequestLabels(TeaModel): + def __init__( + self, + key: str = None, + value: str = None, + ): + self.key = key + self.value = value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + +class AddImageLabelsRequest(TeaModel): + def __init__( + self, + labels: List[AddImageLabelsRequestLabels] = None, + ): + # This parameter is required. + self.labels = labels + + def validate(self): + if self.labels: + for k in self.labels: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = AddImageLabelsRequestLabels() + self.labels.append(temp_model.from_map(k)) + return self + + +class AddImageLabelsResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class AddImageLabelsResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: AddImageLabelsResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = AddImageLabelsResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class AddMemberRoleResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class AddMemberRoleResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: AddMemberRoleResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = AddMemberRoleResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class AddWorkspaceQuotaResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class AddWorkspaceQuotaResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: AddWorkspaceQuotaResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = AddWorkspaceQuotaResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class AssumeServiceIdentityRoleResponseBody(TeaModel): + def __init__( + self, + access_key_id: str = None, + access_key_secret: str = None, + request_id: str = None, + security_token: str = None, + ): + self.access_key_id = access_key_id + self.access_key_secret = access_key_secret + self.request_id = request_id + self.security_token = security_token + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.access_key_id is not None: + result['AccessKeyId'] = self.access_key_id + if self.access_key_secret is not None: + result['AccessKeySecret'] = self.access_key_secret + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.security_token is not None: + result['SecurityToken'] = self.security_token + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('AccessKeyId') is not None: + self.access_key_id = m.get('AccessKeyId') + if m.get('AccessKeySecret') is not None: + self.access_key_secret = m.get('AccessKeySecret') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('SecurityToken') is not None: + self.security_token = m.get('SecurityToken') + return self + + +class AssumeServiceIdentityRoleResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: AssumeServiceIdentityRoleResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = AssumeServiceIdentityRoleResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class ChangeDatasetOwnerRequest(TeaModel): + def __init__( + self, + user_id: str = None, + ): + self.user_id = user_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.user_id is not None: + result['UserId'] = self.user_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + return self + + +class ChangeDatasetOwnerResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['requestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('requestId') is not None: + self.request_id = m.get('requestId') + return self + + +class ChangeDatasetOwnerResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: ChangeDatasetOwnerResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = ChangeDatasetOwnerResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class CreateCodeSourceRequest(TeaModel): + def __init__( + self, + accessibility: str = None, + code_branch: str = None, + code_repo: str = None, + code_repo_access_token: str = None, + code_repo_user_name: str = None, + description: str = None, + display_name: str = None, + mount_path: str = None, + workspace_id: str = None, + ): + self.accessibility = accessibility + self.code_branch = code_branch + self.code_repo = code_repo + self.code_repo_access_token = code_repo_access_token + self.code_repo_user_name = code_repo_user_name + self.description = description + # This parameter is required. + self.display_name = display_name + self.mount_path = mount_path + # This parameter is required. + self.workspace_id = workspace_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + if self.code_branch is not None: + result['CodeBranch'] = self.code_branch + if self.code_repo is not None: + result['CodeRepo'] = self.code_repo + if self.code_repo_access_token is not None: + result['CodeRepoAccessToken'] = self.code_repo_access_token + if self.code_repo_user_name is not None: + result['CodeRepoUserName'] = self.code_repo_user_name + if self.description is not None: + result['Description'] = self.description + if self.display_name is not None: + result['DisplayName'] = self.display_name + if self.mount_path is not None: + result['MountPath'] = self.mount_path + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + if m.get('CodeBranch') is not None: + self.code_branch = m.get('CodeBranch') + if m.get('CodeRepo') is not None: + self.code_repo = m.get('CodeRepo') + if m.get('CodeRepoAccessToken') is not None: + self.code_repo_access_token = m.get('CodeRepoAccessToken') + if m.get('CodeRepoUserName') is not None: + self.code_repo_user_name = m.get('CodeRepoUserName') + if m.get('Description') is not None: + self.description = m.get('Description') + if m.get('DisplayName') is not None: + self.display_name = m.get('DisplayName') + if m.get('MountPath') is not None: + self.mount_path = m.get('MountPath') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + return self + + +class CreateCodeSourceResponseBody(TeaModel): + def __init__( + self, + code_source_id: str = None, + request_id: str = None, + ): + self.code_source_id = code_source_id + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code_source_id is not None: + result['CodeSourceId'] = self.code_source_id + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('CodeSourceId') is not None: + self.code_source_id = m.get('CodeSourceId') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class CreateCodeSourceResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateCodeSourceResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateCodeSourceResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class CreateCollectionRequest(TeaModel): + def __init__( + self, + collection_name: str = None, + ): + # This parameter is required. + self.collection_name = collection_name + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.collection_name is not None: + result['CollectionName'] = self.collection_name + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('CollectionName') is not None: + self.collection_name = m.get('CollectionName') + return self + + +class CreateCollectionResponseBody(TeaModel): + def __init__( + self, + collection_name: str = None, + request_id: str = None, + ): + self.collection_name = collection_name + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.collection_name is not None: + result['CollectionName'] = self.collection_name + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('CollectionName') is not None: + self.collection_name = m.get('CollectionName') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class CreateCollectionResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateCollectionResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateCollectionResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class CreateDatasetRequest(TeaModel): + def __init__( + self, + accessibility: str = None, + data_count: int = None, + data_size: int = None, + data_source_type: str = None, + data_type: str = None, + description: str = None, + labels: List[Label] = None, + name: str = None, + options: str = None, + property: str = None, + provider: str = None, + provider_type: str = None, + source_dataset_id: str = None, + source_dataset_version: str = None, + source_id: str = None, + source_type: str = None, + uri: str = None, + user_id: str = None, + version_description: str = None, + version_labels: List[Label] = None, + workspace_id: str = None, + ): + self.accessibility = accessibility + self.data_count = data_count + self.data_size = data_size + # This parameter is required. + self.data_source_type = data_source_type + self.data_type = data_type + self.description = description + self.labels = labels + # This parameter is required. + self.name = name + self.options = options + # This parameter is required. + self.property = property + self.provider = provider + self.provider_type = provider_type + self.source_dataset_id = source_dataset_id + self.source_dataset_version = source_dataset_version + self.source_id = source_id + self.source_type = source_type + # This parameter is required. + self.uri = uri + self.user_id = user_id + self.version_description = version_description + self.version_labels = version_labels + self.workspace_id = workspace_id + + def validate(self): + if self.labels: + for k in self.labels: + if k: + k.validate() + if self.version_labels: + for k in self.version_labels: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + if self.data_count is not None: + result['DataCount'] = self.data_count + if self.data_size is not None: + result['DataSize'] = self.data_size + if self.data_source_type is not None: + result['DataSourceType'] = self.data_source_type + if self.data_type is not None: + result['DataType'] = self.data_type + if self.description is not None: + result['Description'] = self.description + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.name is not None: + result['Name'] = self.name + if self.options is not None: + result['Options'] = self.options + if self.property is not None: + result['Property'] = self.property + if self.provider is not None: + result['Provider'] = self.provider + if self.provider_type is not None: + result['ProviderType'] = self.provider_type + if self.source_dataset_id is not None: + result['SourceDatasetId'] = self.source_dataset_id + if self.source_dataset_version is not None: + result['SourceDatasetVersion'] = self.source_dataset_version + if self.source_id is not None: + result['SourceId'] = self.source_id + if self.source_type is not None: + result['SourceType'] = self.source_type + if self.uri is not None: + result['Uri'] = self.uri + if self.user_id is not None: + result['UserId'] = self.user_id + if self.version_description is not None: + result['VersionDescription'] = self.version_description + result['VersionLabels'] = [] + if self.version_labels is not None: + for k in self.version_labels: + result['VersionLabels'].append(k.to_map() if k else None) + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + if m.get('DataCount') is not None: + self.data_count = m.get('DataCount') + if m.get('DataSize') is not None: + self.data_size = m.get('DataSize') + if m.get('DataSourceType') is not None: + self.data_source_type = m.get('DataSourceType') + if m.get('DataType') is not None: + self.data_type = m.get('DataType') + if m.get('Description') is not None: + self.description = m.get('Description') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = Label() + self.labels.append(temp_model.from_map(k)) + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('Options') is not None: + self.options = m.get('Options') + if m.get('Property') is not None: + self.property = m.get('Property') + if m.get('Provider') is not None: + self.provider = m.get('Provider') + if m.get('ProviderType') is not None: + self.provider_type = m.get('ProviderType') + if m.get('SourceDatasetId') is not None: + self.source_dataset_id = m.get('SourceDatasetId') + if m.get('SourceDatasetVersion') is not None: + self.source_dataset_version = m.get('SourceDatasetVersion') + if m.get('SourceId') is not None: + self.source_id = m.get('SourceId') + if m.get('SourceType') is not None: + self.source_type = m.get('SourceType') + if m.get('Uri') is not None: + self.uri = m.get('Uri') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('VersionDescription') is not None: + self.version_description = m.get('VersionDescription') + self.version_labels = [] + if m.get('VersionLabels') is not None: + for k in m.get('VersionLabels'): + temp_model = Label() + self.version_labels.append(temp_model.from_map(k)) + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + return self + + +class CreateDatasetResponseBody(TeaModel): + def __init__( + self, + dataset_id: str = None, + request_id: str = None, + ): + self.dataset_id = dataset_id + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.dataset_id is not None: + result['DatasetId'] = self.dataset_id + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('DatasetId') is not None: + self.dataset_id = m.get('DatasetId') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class CreateDatasetResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateDatasetResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateDatasetResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class CreateDatasetLabelsRequest(TeaModel): + def __init__( + self, + labels: List[Label] = None, + ): + self.labels = labels + + def validate(self): + if self.labels: + for k in self.labels: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = Label() + self.labels.append(temp_model.from_map(k)) + return self + + +class CreateDatasetLabelsResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class CreateDatasetLabelsResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateDatasetLabelsResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateDatasetLabelsResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class CreateDefaultWorkspaceRequestResources(TeaModel): + def __init__( + self, + product_type: str = None, + resource_type: str = None, + ): + self.product_type = product_type + self.resource_type = resource_type + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.product_type is not None: + result['ProductType'] = self.product_type + if self.resource_type is not None: + result['ResourceType'] = self.resource_type + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ProductType') is not None: + self.product_type = m.get('ProductType') + if m.get('ResourceType') is not None: + self.resource_type = m.get('ResourceType') + return self + + +class CreateDefaultWorkspaceRequest(TeaModel): + def __init__( + self, + add_all_ram_users: bool = None, + description: str = None, + env_types: List[str] = None, + resources: List[CreateDefaultWorkspaceRequestResources] = None, + ): + self.add_all_ram_users = add_all_ram_users + # This parameter is required. + self.description = description + # This parameter is required. + self.env_types = env_types + self.resources = resources + + def validate(self): + if self.resources: + for k in self.resources: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.add_all_ram_users is not None: + result['AddAllRamUsers'] = self.add_all_ram_users + if self.description is not None: + result['Description'] = self.description + if self.env_types is not None: + result['EnvTypes'] = self.env_types + result['Resources'] = [] + if self.resources is not None: + for k in self.resources: + result['Resources'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('AddAllRamUsers') is not None: + self.add_all_ram_users = m.get('AddAllRamUsers') + if m.get('Description') is not None: + self.description = m.get('Description') + if m.get('EnvTypes') is not None: + self.env_types = m.get('EnvTypes') + self.resources = [] + if m.get('Resources') is not None: + for k in m.get('Resources'): + temp_model = CreateDefaultWorkspaceRequestResources() + self.resources.append(temp_model.from_map(k)) + return self + + +class CreateDefaultWorkspaceResponseBody(TeaModel): + def __init__( + self, + instance_job_id: str = None, + request_id: str = None, + workspace_id: str = None, + ): + self.instance_job_id = instance_job_id + self.request_id = request_id + self.workspace_id = workspace_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.instance_job_id is not None: + result['InstanceJobId'] = self.instance_job_id + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('InstanceJobId') is not None: + self.instance_job_id = m.get('InstanceJobId') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + return self + + +class CreateDefaultWorkspaceResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateDefaultWorkspaceResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateDefaultWorkspaceResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class CreateDingTalkRobotMessageRequest(TeaModel): + def __init__( + self, + access_token: str = None, + message: str = None, + secret: str = None, + ): + # This parameter is required. + self.access_token = access_token + # This parameter is required. + self.message = message + self.secret = secret + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.access_token is not None: + result['AccessToken'] = self.access_token + if self.message is not None: + result['Message'] = self.message + if self.secret is not None: + result['Secret'] = self.secret + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('AccessToken') is not None: + self.access_token = m.get('AccessToken') + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('Secret') is not None: + self.secret = m.get('Secret') + return self + + +class CreateDingTalkRobotMessageResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class CreateDingTalkRobotMessageResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateDingTalkRobotMessageResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateDingTalkRobotMessageResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class CreateDatasetVersionRequest(TeaModel): + def __init__( + self, + data_count: int = None, + data_size: int = None, + data_source_type: str = None, + description: str = None, + labels: List[Label] = None, + options: str = None, + property: str = None, + source_id: str = None, + source_type: str = None, + uri: str = None, + ): + self.data_count = data_count + self.data_size = data_size + # This parameter is required. + self.data_source_type = data_source_type + self.description = description + self.labels = labels + self.options = options + # This parameter is required. + self.property = property + self.source_id = source_id + self.source_type = source_type + # This parameter is required. + self.uri = uri + + def validate(self): + if self.labels: + for k in self.labels: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.data_count is not None: + result['DataCount'] = self.data_count + if self.data_size is not None: + result['DataSize'] = self.data_size + if self.data_source_type is not None: + result['DataSourceType'] = self.data_source_type + if self.description is not None: + result['Description'] = self.description + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.options is not None: + result['Options'] = self.options + if self.property is not None: + result['Property'] = self.property + if self.source_id is not None: + result['SourceId'] = self.source_id + if self.source_type is not None: + result['SourceType'] = self.source_type + if self.uri is not None: + result['Uri'] = self.uri + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('DataCount') is not None: + self.data_count = m.get('DataCount') + if m.get('DataSize') is not None: + self.data_size = m.get('DataSize') + if m.get('DataSourceType') is not None: + self.data_source_type = m.get('DataSourceType') + if m.get('Description') is not None: + self.description = m.get('Description') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = Label() + self.labels.append(temp_model.from_map(k)) + if m.get('Options') is not None: + self.options = m.get('Options') + if m.get('Property') is not None: + self.property = m.get('Property') + if m.get('SourceId') is not None: + self.source_id = m.get('SourceId') + if m.get('SourceType') is not None: + self.source_type = m.get('SourceType') + if m.get('Uri') is not None: + self.uri = m.get('Uri') + return self + + +class CreateDatasetVersionResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + version_name: str = None, + ): + self.request_id = request_id + self.version_name = version_name + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.version_name is not None: + result['VersionName'] = self.version_name + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('VersionName') is not None: + self.version_name = m.get('VersionName') + return self + + +class CreateDatasetVersionResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateDatasetVersionResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateDatasetVersionResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class CreateDatasetVersionLabelsRequest(TeaModel): + def __init__( + self, + labels: List[Label] = None, + ): + # This parameter is required. + self.labels = labels + + def validate(self): + if self.labels: + for k in self.labels: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = Label() + self.labels.append(temp_model.from_map(k)) + return self + + +class CreateDatasetVersionLabelsResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class CreateDatasetVersionLabelsResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateDatasetVersionLabelsResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateDatasetVersionLabelsResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class CreateExperimentRequest(TeaModel): + def __init__( + self, + accessibility: str = None, + artifact_uri: str = None, + labels: List[LabelInfo] = None, + name: str = None, + workspace_id: str = None, + ): + self.accessibility = accessibility + # Artifact的OSS存储路径 + # + # This parameter is required. + self.artifact_uri = artifact_uri + # 标签 + self.labels = labels + # 名称 + # + # This parameter is required. + self.name = name + # 工作空间ID + # + # This parameter is required. + self.workspace_id = workspace_id + + def validate(self): + if self.labels: + for k in self.labels: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + if self.artifact_uri is not None: + result['ArtifactUri'] = self.artifact_uri + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.name is not None: + result['Name'] = self.name + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + if m.get('ArtifactUri') is not None: + self.artifact_uri = m.get('ArtifactUri') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = LabelInfo() + self.labels.append(temp_model.from_map(k)) + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + return self + + +class CreateExperimentResponseBody(TeaModel): + def __init__( + self, + experiment_id: str = None, + request_id: str = None, + ): + self.experiment_id = experiment_id + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.experiment_id is not None: + result['ExperimentId'] = self.experiment_id + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ExperimentId') is not None: + self.experiment_id = m.get('ExperimentId') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class CreateExperimentResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateExperimentResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateExperimentResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class CreateMemberRequestMembers(TeaModel): + def __init__( + self, + roles: List[str] = None, + user_id: str = None, + ): + # This parameter is required. + self.roles = roles + # This parameter is required. + self.user_id = user_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.roles is not None: + result['Roles'] = self.roles + if self.user_id is not None: + result['UserId'] = self.user_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Roles') is not None: + self.roles = m.get('Roles') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + return self + + +class CreateMemberRequest(TeaModel): + def __init__( + self, + members: List[CreateMemberRequestMembers] = None, + ): + # This parameter is required. + self.members = members + + def validate(self): + if self.members: + for k in self.members: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + result['Members'] = [] + if self.members is not None: + for k in self.members: + result['Members'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + self.members = [] + if m.get('Members') is not None: + for k in m.get('Members'): + temp_model = CreateMemberRequestMembers() + self.members.append(temp_model.from_map(k)) + return self + + +class CreateMemberResponseBodyMembers(TeaModel): + def __init__( + self, + display_name: str = None, + member_id: str = None, + roles: List[str] = None, + user_id: str = None, + ): + self.display_name = display_name + self.member_id = member_id + self.roles = roles + self.user_id = user_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.display_name is not None: + result['DisplayName'] = self.display_name + if self.member_id is not None: + result['MemberId'] = self.member_id + if self.roles is not None: + result['Roles'] = self.roles + if self.user_id is not None: + result['UserId'] = self.user_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('DisplayName') is not None: + self.display_name = m.get('DisplayName') + if m.get('MemberId') is not None: + self.member_id = m.get('MemberId') + if m.get('Roles') is not None: + self.roles = m.get('Roles') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + return self + + +class CreateMemberResponseBody(TeaModel): + def __init__( + self, + members: List[CreateMemberResponseBodyMembers] = None, + request_id: str = None, + ): + self.members = members + self.request_id = request_id + + def validate(self): + if self.members: + for k in self.members: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + result['Members'] = [] + if self.members is not None: + for k in self.members: + result['Members'].append(k.to_map() if k else None) + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + self.members = [] + if m.get('Members') is not None: + for k in m.get('Members'): + temp_model = CreateMemberResponseBodyMembers() + self.members.append(temp_model.from_map(k)) + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class CreateMemberResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateMemberResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateMemberResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class CreateModelRequest(TeaModel): + def __init__( + self, + accessibility: str = None, + domain: str = None, + extra_info: Dict[str, Any] = None, + labels: List[Label] = None, + model_description: str = None, + model_doc: str = None, + model_name: str = None, + model_type: str = None, + order_number: int = None, + origin: str = None, + task: str = None, + workspace_id: str = None, + ): + self.accessibility = accessibility + self.domain = domain + self.extra_info = extra_info + self.labels = labels + self.model_description = model_description + self.model_doc = model_doc + # This parameter is required. + self.model_name = model_name + self.model_type = model_type + self.order_number = order_number + self.origin = origin + self.task = task + self.workspace_id = workspace_id + + def validate(self): + if self.labels: + for k in self.labels: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + if self.domain is not None: + result['Domain'] = self.domain + if self.extra_info is not None: + result['ExtraInfo'] = self.extra_info + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.model_description is not None: + result['ModelDescription'] = self.model_description + if self.model_doc is not None: + result['ModelDoc'] = self.model_doc + if self.model_name is not None: + result['ModelName'] = self.model_name + if self.model_type is not None: + result['ModelType'] = self.model_type + if self.order_number is not None: + result['OrderNumber'] = self.order_number + if self.origin is not None: + result['Origin'] = self.origin + if self.task is not None: + result['Task'] = self.task + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + if m.get('Domain') is not None: + self.domain = m.get('Domain') + if m.get('ExtraInfo') is not None: + self.extra_info = m.get('ExtraInfo') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = Label() + self.labels.append(temp_model.from_map(k)) + if m.get('ModelDescription') is not None: + self.model_description = m.get('ModelDescription') + if m.get('ModelDoc') is not None: + self.model_doc = m.get('ModelDoc') + if m.get('ModelName') is not None: + self.model_name = m.get('ModelName') + if m.get('ModelType') is not None: + self.model_type = m.get('ModelType') + if m.get('OrderNumber') is not None: + self.order_number = m.get('OrderNumber') + if m.get('Origin') is not None: + self.origin = m.get('Origin') + if m.get('Task') is not None: + self.task = m.get('Task') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + return self + + +class CreateModelResponseBody(TeaModel): + def __init__( + self, + model_id: str = None, + request_id: str = None, + ): + self.model_id = model_id + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.model_id is not None: + result['ModelId'] = self.model_id + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ModelId') is not None: + self.model_id = m.get('ModelId') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class CreateModelResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateModelResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateModelResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class CreateModelLabelsRequest(TeaModel): + def __init__( + self, + labels: List[Label] = None, + ): + self.labels = labels + + def validate(self): + if self.labels: + for k in self.labels: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = Label() + self.labels.append(temp_model.from_map(k)) + return self + + +class CreateModelLabelsResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class CreateModelLabelsResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateModelLabelsResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateModelLabelsResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class CreateModelReleaseRequest(TeaModel): + def __init__( + self, + collections: str = None, + target_model_origin: str = None, + target_model_provider: str = None, + ): + self.collections = collections + self.target_model_origin = target_model_origin + self.target_model_provider = target_model_provider + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.collections is not None: + result['Collections'] = self.collections + if self.target_model_origin is not None: + result['TargetModelOrigin'] = self.target_model_origin + if self.target_model_provider is not None: + result['TargetModelProvider'] = self.target_model_provider + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Collections') is not None: + self.collections = m.get('Collections') + if m.get('TargetModelOrigin') is not None: + self.target_model_origin = m.get('TargetModelOrigin') + if m.get('TargetModelProvider') is not None: + self.target_model_provider = m.get('TargetModelProvider') + return self + + +class CreateModelReleaseResponseBody(TeaModel): + def __init__( + self, + model_id: str = None, + request_id: str = None, + ): + self.model_id = model_id + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.model_id is not None: + result['ModelId'] = self.model_id + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ModelId') is not None: + self.model_id = m.get('ModelId') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class CreateModelReleaseResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateModelReleaseResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateModelReleaseResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class CreateModelVersionRequest(TeaModel): + def __init__( + self, + approval_status: str = None, + compression_spec: Dict[str, Any] = None, + evaluation_spec: Dict[str, Any] = None, + extra_info: Dict[str, Any] = None, + format_type: str = None, + framework_type: str = None, + inference_spec: Dict[str, Any] = None, + labels: List[Label] = None, + metrics: Dict[str, Any] = None, + options: str = None, + source_id: str = None, + source_type: str = None, + training_spec: Dict[str, Any] = None, + uri: str = None, + version_description: str = None, + version_name: str = None, + ): + self.approval_status = approval_status + self.compression_spec = compression_spec + self.evaluation_spec = evaluation_spec + self.extra_info = extra_info + self.format_type = format_type + self.framework_type = framework_type + self.inference_spec = inference_spec + self.labels = labels + self.metrics = metrics + self.options = options + self.source_id = source_id + self.source_type = source_type + self.training_spec = training_spec + # This parameter is required. + self.uri = uri + self.version_description = version_description + self.version_name = version_name + + def validate(self): + if self.labels: + for k in self.labels: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.approval_status is not None: + result['ApprovalStatus'] = self.approval_status + if self.compression_spec is not None: + result['CompressionSpec'] = self.compression_spec + if self.evaluation_spec is not None: + result['EvaluationSpec'] = self.evaluation_spec + if self.extra_info is not None: + result['ExtraInfo'] = self.extra_info + if self.format_type is not None: + result['FormatType'] = self.format_type + if self.framework_type is not None: + result['FrameworkType'] = self.framework_type + if self.inference_spec is not None: + result['InferenceSpec'] = self.inference_spec + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.metrics is not None: + result['Metrics'] = self.metrics + if self.options is not None: + result['Options'] = self.options + if self.source_id is not None: + result['SourceId'] = self.source_id + if self.source_type is not None: + result['SourceType'] = self.source_type + if self.training_spec is not None: + result['TrainingSpec'] = self.training_spec + if self.uri is not None: + result['Uri'] = self.uri + if self.version_description is not None: + result['VersionDescription'] = self.version_description + if self.version_name is not None: + result['VersionName'] = self.version_name + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ApprovalStatus') is not None: + self.approval_status = m.get('ApprovalStatus') + if m.get('CompressionSpec') is not None: + self.compression_spec = m.get('CompressionSpec') + if m.get('EvaluationSpec') is not None: + self.evaluation_spec = m.get('EvaluationSpec') + if m.get('ExtraInfo') is not None: + self.extra_info = m.get('ExtraInfo') + if m.get('FormatType') is not None: + self.format_type = m.get('FormatType') + if m.get('FrameworkType') is not None: + self.framework_type = m.get('FrameworkType') + if m.get('InferenceSpec') is not None: + self.inference_spec = m.get('InferenceSpec') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = Label() + self.labels.append(temp_model.from_map(k)) + if m.get('Metrics') is not None: + self.metrics = m.get('Metrics') + if m.get('Options') is not None: + self.options = m.get('Options') + if m.get('SourceId') is not None: + self.source_id = m.get('SourceId') + if m.get('SourceType') is not None: + self.source_type = m.get('SourceType') + if m.get('TrainingSpec') is not None: + self.training_spec = m.get('TrainingSpec') + if m.get('Uri') is not None: + self.uri = m.get('Uri') + if m.get('VersionDescription') is not None: + self.version_description = m.get('VersionDescription') + if m.get('VersionName') is not None: + self.version_name = m.get('VersionName') + return self + + +class CreateModelVersionResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + version_name: str = None, + ): + self.request_id = request_id + self.version_name = version_name + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.version_name is not None: + result['VersionName'] = self.version_name + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('VersionName') is not None: + self.version_name = m.get('VersionName') + return self + + +class CreateModelVersionResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateModelVersionResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateModelVersionResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class CreateModelVersionLabelsRequest(TeaModel): + def __init__( + self, + labels: List[Label] = None, + ): + self.labels = labels + + def validate(self): + if self.labels: + for k in self.labels: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = Label() + self.labels.append(temp_model.from_map(k)) + return self + + +class CreateModelVersionLabelsResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class CreateModelVersionLabelsResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateModelVersionLabelsResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateModelVersionLabelsResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class CreateModelVersionReleaseRequest(TeaModel): + def __init__( + self, + target_model_origin: str = None, + target_model_provider: str = None, + ): + self.target_model_origin = target_model_origin + self.target_model_provider = target_model_provider + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.target_model_origin is not None: + result['TargetModelOrigin'] = self.target_model_origin + if self.target_model_provider is not None: + result['TargetModelProvider'] = self.target_model_provider + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('TargetModelOrigin') is not None: + self.target_model_origin = m.get('TargetModelOrigin') + if m.get('TargetModelProvider') is not None: + self.target_model_provider = m.get('TargetModelProvider') + return self + + +class CreateModelVersionReleaseResponseBody(TeaModel): + def __init__( + self, + model_id: str = None, + request_id: str = None, + version_name: str = None, + ): + self.model_id = model_id + self.request_id = request_id + self.version_name = version_name + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.model_id is not None: + result['ModelId'] = self.model_id + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.version_name is not None: + result['VersionName'] = self.version_name + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ModelId') is not None: + self.model_id = m.get('ModelId') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('VersionName') is not None: + self.version_name = m.get('VersionName') + return self + + +class CreateModelVersionReleaseResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateModelVersionReleaseResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateModelVersionReleaseResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class CreateProductOrdersRequestProductsInstanceProperties(TeaModel): + def __init__( + self, + code: str = None, + name: str = None, + value: str = None, + ): + self.code = code + self.name = name + self.value = value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code is not None: + result['Code'] = self.code + if self.name is not None: + result['Name'] = self.name + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + +class CreateProductOrdersRequestProducts(TeaModel): + def __init__( + self, + auto_renew: bool = None, + charge_type: str = None, + duration: int = None, + instance_properties: List[CreateProductOrdersRequestProductsInstanceProperties] = None, + order_type: str = None, + pricing_cycle: str = None, + product_code: str = None, + ): + self.auto_renew = auto_renew + self.charge_type = charge_type + self.duration = duration + self.instance_properties = instance_properties + self.order_type = order_type + self.pricing_cycle = pricing_cycle + self.product_code = product_code + + def validate(self): + if self.instance_properties: + for k in self.instance_properties: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.auto_renew is not None: + result['AutoRenew'] = self.auto_renew + if self.charge_type is not None: + result['ChargeType'] = self.charge_type + if self.duration is not None: + result['Duration'] = self.duration + result['InstanceProperties'] = [] + if self.instance_properties is not None: + for k in self.instance_properties: + result['InstanceProperties'].append(k.to_map() if k else None) + if self.order_type is not None: + result['OrderType'] = self.order_type + if self.pricing_cycle is not None: + result['PricingCycle'] = self.pricing_cycle + if self.product_code is not None: + result['ProductCode'] = self.product_code + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('AutoRenew') is not None: + self.auto_renew = m.get('AutoRenew') + if m.get('ChargeType') is not None: + self.charge_type = m.get('ChargeType') + if m.get('Duration') is not None: + self.duration = m.get('Duration') + self.instance_properties = [] + if m.get('InstanceProperties') is not None: + for k in m.get('InstanceProperties'): + temp_model = CreateProductOrdersRequestProductsInstanceProperties() + self.instance_properties.append(temp_model.from_map(k)) + if m.get('OrderType') is not None: + self.order_type = m.get('OrderType') + if m.get('PricingCycle') is not None: + self.pricing_cycle = m.get('PricingCycle') + if m.get('ProductCode') is not None: + self.product_code = m.get('ProductCode') + return self + + +class CreateProductOrdersRequest(TeaModel): + def __init__( + self, + auto_pay: bool = None, + products: List[CreateProductOrdersRequestProducts] = None, + ): + self.auto_pay = auto_pay + self.products = products + + def validate(self): + if self.products: + for k in self.products: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.auto_pay is not None: + result['AutoPay'] = self.auto_pay + result['Products'] = [] + if self.products is not None: + for k in self.products: + result['Products'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('AutoPay') is not None: + self.auto_pay = m.get('AutoPay') + self.products = [] + if m.get('Products') is not None: + for k in m.get('Products'): + temp_model = CreateProductOrdersRequestProducts() + self.products.append(temp_model.from_map(k)) + return self + + +class CreateProductOrdersResponseBody(TeaModel): + def __init__( + self, + buy_product_request_id: str = None, + message: str = None, + order_id: str = None, + request_id: str = None, + ): + self.buy_product_request_id = buy_product_request_id + self.message = message + self.order_id = order_id + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.buy_product_request_id is not None: + result['BuyProductRequestId'] = self.buy_product_request_id + if self.message is not None: + result['Message'] = self.message + if self.order_id is not None: + result['OrderId'] = self.order_id + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('BuyProductRequestId') is not None: + self.buy_product_request_id = m.get('BuyProductRequestId') + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('OrderId') is not None: + self.order_id = m.get('OrderId') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class CreateProductOrdersResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateProductOrdersResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateProductOrdersResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class CreateServiceIdentityRoleRequest(TeaModel): + def __init__( + self, + role_name: str = None, + ): + # This parameter is required. + self.role_name = role_name + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.role_name is not None: + result['RoleName'] = self.role_name + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RoleName') is not None: + self.role_name = m.get('RoleName') + return self + + +class CreateServiceIdentityRoleResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class CreateServiceIdentityRoleResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateServiceIdentityRoleResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateServiceIdentityRoleResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class CreateServiceTemplateRequest(TeaModel): + def __init__( + self, + inference_spec: Dict[str, Any] = None, + labels: List[Label] = None, + order_number: int = None, + provider: str = None, + service_template_description: str = None, + service_template_doc: str = None, + service_template_name: str = None, + ): + self.inference_spec = inference_spec + self.labels = labels + self.order_number = order_number + self.provider = provider + self.service_template_description = service_template_description + self.service_template_doc = service_template_doc + # This parameter is required. + self.service_template_name = service_template_name + + def validate(self): + if self.labels: + for k in self.labels: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.inference_spec is not None: + result['InferenceSpec'] = self.inference_spec + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.order_number is not None: + result['OrderNumber'] = self.order_number + if self.provider is not None: + result['Provider'] = self.provider + if self.service_template_description is not None: + result['ServiceTemplateDescription'] = self.service_template_description + if self.service_template_doc is not None: + result['ServiceTemplateDoc'] = self.service_template_doc + if self.service_template_name is not None: + result['ServiceTemplateName'] = self.service_template_name + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('InferenceSpec') is not None: + self.inference_spec = m.get('InferenceSpec') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = Label() + self.labels.append(temp_model.from_map(k)) + if m.get('OrderNumber') is not None: + self.order_number = m.get('OrderNumber') + if m.get('Provider') is not None: + self.provider = m.get('Provider') + if m.get('ServiceTemplateDescription') is not None: + self.service_template_description = m.get('ServiceTemplateDescription') + if m.get('ServiceTemplateDoc') is not None: + self.service_template_doc = m.get('ServiceTemplateDoc') + if m.get('ServiceTemplateName') is not None: + self.service_template_name = m.get('ServiceTemplateName') + return self + + +class CreateServiceTemplateResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + service_template_id: str = None, + ): + self.request_id = request_id + self.service_template_id = service_template_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.service_template_id is not None: + result['ServiceTemplateId'] = self.service_template_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('ServiceTemplateId') is not None: + self.service_template_id = m.get('ServiceTemplateId') + return self + + +class CreateServiceTemplateResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateServiceTemplateResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateServiceTemplateResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class CreateServiceTemplateLabelsRequest(TeaModel): + def __init__( + self, + labels: List[Label] = None, + ): + self.labels = labels + + def validate(self): + if self.labels: + for k in self.labels: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = Label() + self.labels.append(temp_model.from_map(k)) + return self + + +class CreateServiceTemplateLabelsResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class CreateServiceTemplateLabelsResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateServiceTemplateLabelsResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateServiceTemplateLabelsResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class CreateTrialRequest(TeaModel): + def __init__( + self, + experiment_id: str = None, + labels: List[LabelInfo] = None, + name: str = None, + source_id: str = None, + source_type: str = None, + ): + # This parameter is required. + self.experiment_id = experiment_id + self.labels = labels + self.name = name + self.source_id = source_id + # This parameter is required. + self.source_type = source_type + + def validate(self): + if self.labels: + for k in self.labels: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.experiment_id is not None: + result['ExperimentId'] = self.experiment_id + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.name is not None: + result['Name'] = self.name + if self.source_id is not None: + result['SourceId'] = self.source_id + if self.source_type is not None: + result['SourceType'] = self.source_type + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ExperimentId') is not None: + self.experiment_id = m.get('ExperimentId') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = LabelInfo() + self.labels.append(temp_model.from_map(k)) + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('SourceId') is not None: + self.source_id = m.get('SourceId') + if m.get('SourceType') is not None: + self.source_type = m.get('SourceType') + return self + + +class CreateTrialResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + trial_id: str = None, + ): + self.request_id = request_id + self.trial_id = trial_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.trial_id is not None: + result['TrialId'] = self.trial_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('TrialId') is not None: + self.trial_id = m.get('TrialId') + return self + + +class CreateTrialResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateTrialResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateTrialResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class CreateUserResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + user_id: str = None, + ): + self.request_id = request_id + self.user_id = user_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.user_id is not None: + result['UserId'] = self.user_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + return self + + +class CreateUserResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateUserResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateUserResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class CreateWorkspaceRequest(TeaModel): + def __init__( + self, + description: str = None, + display_name: str = None, + env_types: List[str] = None, + workspace_name: str = None, + ): + # This parameter is required. + self.description = description + self.display_name = display_name + # This parameter is required. + self.env_types = env_types + # This parameter is required. + self.workspace_name = workspace_name + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.description is not None: + result['Description'] = self.description + if self.display_name is not None: + result['DisplayName'] = self.display_name + if self.env_types is not None: + result['EnvTypes'] = self.env_types + if self.workspace_name is not None: + result['WorkspaceName'] = self.workspace_name + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Description') is not None: + self.description = m.get('Description') + if m.get('DisplayName') is not None: + self.display_name = m.get('DisplayName') + if m.get('EnvTypes') is not None: + self.env_types = m.get('EnvTypes') + if m.get('WorkspaceName') is not None: + self.workspace_name = m.get('WorkspaceName') + return self + + +class CreateWorkspaceResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + workspace_id: str = None, + ): + self.request_id = request_id + self.workspace_id = workspace_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + return self + + +class CreateWorkspaceResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateWorkspaceResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateWorkspaceResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class CreateWorkspaceResourceRequestResourcesLabels(TeaModel): + def __init__( + self, + key: str = None, + value: str = None, + ): + self.key = key + self.value = value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + +class CreateWorkspaceResourceRequestResourcesQuotas(TeaModel): + def __init__( + self, + id: str = None, + ): + # This parameter is required. + self.id = id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.id is not None: + result['Id'] = self.id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Id') is not None: + self.id = m.get('Id') + return self + + +class CreateWorkspaceResourceRequestResources(TeaModel): + def __init__( + self, + env_type: str = None, + group_name: str = None, + is_default: bool = None, + labels: List[CreateWorkspaceResourceRequestResourcesLabels] = None, + name: str = None, + product_type: str = None, + quotas: List[CreateWorkspaceResourceRequestResourcesQuotas] = None, + resource_type: str = None, + spec: Dict[str, Any] = None, + workspace_id: str = None, + ): + # This parameter is required. + self.env_type = env_type + self.group_name = group_name + self.is_default = is_default + self.labels = labels + # This parameter is required. + self.name = name + self.product_type = product_type + self.quotas = quotas + self.resource_type = resource_type + self.spec = spec + # This parameter is required. + self.workspace_id = workspace_id + + def validate(self): + if self.labels: + for k in self.labels: + if k: + k.validate() + if self.quotas: + for k in self.quotas: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.env_type is not None: + result['EnvType'] = self.env_type + if self.group_name is not None: + result['GroupName'] = self.group_name + if self.is_default is not None: + result['IsDefault'] = self.is_default + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.name is not None: + result['Name'] = self.name + if self.product_type is not None: + result['ProductType'] = self.product_type + result['Quotas'] = [] + if self.quotas is not None: + for k in self.quotas: + result['Quotas'].append(k.to_map() if k else None) + if self.resource_type is not None: + result['ResourceType'] = self.resource_type + if self.spec is not None: + result['Spec'] = self.spec + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('EnvType') is not None: + self.env_type = m.get('EnvType') + if m.get('GroupName') is not None: + self.group_name = m.get('GroupName') + if m.get('IsDefault') is not None: + self.is_default = m.get('IsDefault') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = CreateWorkspaceResourceRequestResourcesLabels() + self.labels.append(temp_model.from_map(k)) + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('ProductType') is not None: + self.product_type = m.get('ProductType') + self.quotas = [] + if m.get('Quotas') is not None: + for k in m.get('Quotas'): + temp_model = CreateWorkspaceResourceRequestResourcesQuotas() + self.quotas.append(temp_model.from_map(k)) + if m.get('ResourceType') is not None: + self.resource_type = m.get('ResourceType') + if m.get('Spec') is not None: + self.spec = m.get('Spec') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + return self + + +class CreateWorkspaceResourceRequest(TeaModel): + def __init__( + self, + option: str = None, + resources: List[CreateWorkspaceResourceRequestResources] = None, + ): + self.option = option + # This parameter is required. + self.resources = resources + + def validate(self): + if self.resources: + for k in self.resources: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.option is not None: + result['Option'] = self.option + result['Resources'] = [] + if self.resources is not None: + for k in self.resources: + result['Resources'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Option') is not None: + self.option = m.get('Option') + self.resources = [] + if m.get('Resources') is not None: + for k in m.get('Resources'): + temp_model = CreateWorkspaceResourceRequestResources() + self.resources.append(temp_model.from_map(k)) + return self + + +class CreateWorkspaceResourceResponseBodyResources(TeaModel): + def __init__( + self, + id: str = None, + ): + self.id = id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.id is not None: + result['Id'] = self.id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Id') is not None: + self.id = m.get('Id') + return self + + +class CreateWorkspaceResourceResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + resources: List[CreateWorkspaceResourceResponseBodyResources] = None, + total_count: int = None, + ): + self.request_id = request_id + self.resources = resources + self.total_count = total_count + + def validate(self): + if self.resources: + for k in self.resources: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + result['Resources'] = [] + if self.resources is not None: + for k in self.resources: + result['Resources'].append(k.to_map() if k else None) + if self.total_count is not None: + result['TotalCount'] = self.total_count + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + self.resources = [] + if m.get('Resources') is not None: + for k in m.get('Resources'): + temp_model = CreateWorkspaceResourceResponseBodyResources() + self.resources.append(temp_model.from_map(k)) + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') + return self + + +class CreateWorkspaceResourceResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateWorkspaceResourceResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateWorkspaceResourceResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DeleteCodeSourceResponseBody(TeaModel): + def __init__( + self, + code_source_id: str = None, + request_id: str = None, + ): + self.code_source_id = code_source_id + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code_source_id is not None: + result['CodeSourceId'] = self.code_source_id + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('CodeSourceId') is not None: + self.code_source_id = m.get('CodeSourceId') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class DeleteCodeSourceResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: DeleteCodeSourceResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DeleteCodeSourceResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DeleteCollectionResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class DeleteCollectionResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: DeleteCollectionResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DeleteCollectionResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DeleteConfigRequest(TeaModel): + def __init__( + self, + labels: str = None, + ): + self.labels = labels + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.labels is not None: + result['Labels'] = self.labels + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Labels') is not None: + self.labels = m.get('Labels') + return self + + +class DeleteConfigResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class DeleteConfigResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: DeleteConfigResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DeleteConfigResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DeleteDatasetResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class DeleteDatasetResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: DeleteDatasetResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DeleteDatasetResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DeleteDatasetLabelsRequest(TeaModel): + def __init__( + self, + label_keys: str = None, + ): + self.label_keys = label_keys + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.label_keys is not None: + result['LabelKeys'] = self.label_keys + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('LabelKeys') is not None: + self.label_keys = m.get('LabelKeys') + return self + + +class DeleteDatasetLabelsResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class DeleteDatasetLabelsResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: DeleteDatasetLabelsResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DeleteDatasetLabelsResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DeleteDatasetVersionResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class DeleteDatasetVersionResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: DeleteDatasetVersionResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DeleteDatasetVersionResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DeleteDatasetVersionLabelsRequest(TeaModel): + def __init__( + self, + keys: str = None, + ): + # This parameter is required. + self.keys = keys + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.keys is not None: + result['Keys'] = self.keys + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Keys') is not None: + self.keys = m.get('Keys') + return self + + +class DeleteDatasetVersionLabelsResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class DeleteDatasetVersionLabelsResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: DeleteDatasetVersionLabelsResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DeleteDatasetVersionLabelsResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DeleteExperimentResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class DeleteExperimentResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: DeleteExperimentResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DeleteExperimentResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DeleteExperimentLabelResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class DeleteExperimentLabelResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: DeleteExperimentLabelResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DeleteExperimentLabelResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DeleteMembersRequest(TeaModel): + def __init__( + self, + member_ids: str = None, + ): + # This parameter is required. + self.member_ids = member_ids + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.member_ids is not None: + result['MemberIds'] = self.member_ids + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('MemberIds') is not None: + self.member_ids = m.get('MemberIds') + return self + + +class DeleteMembersResponseBody(TeaModel): + def __init__( + self, + code: str = None, + message: str = None, + request_id: str = None, + ): + self.code = code + self.message = message + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code is not None: + result['Code'] = self.code + if self.message is not None: + result['Message'] = self.message + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class DeleteMembersResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: DeleteMembersResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DeleteMembersResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DeleteModelResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class DeleteModelResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: DeleteModelResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DeleteModelResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DeleteModelDomainRequest(TeaModel): + def __init__( + self, + model_task_ids: str = None, + ): + self.model_task_ids = model_task_ids + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.model_task_ids is not None: + result['ModelTaskIds'] = self.model_task_ids + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ModelTaskIds') is not None: + self.model_task_ids = m.get('ModelTaskIds') + return self + + +class DeleteModelDomainResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class DeleteModelDomainResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: DeleteModelDomainResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DeleteModelDomainResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DeleteModelLabelsRequest(TeaModel): + def __init__( + self, + keys: str = None, + label_keys: str = None, + ): + self.keys = keys + self.label_keys = label_keys + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.keys is not None: + result['Keys'] = self.keys + if self.label_keys is not None: + result['LabelKeys'] = self.label_keys + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Keys') is not None: + self.keys = m.get('Keys') + if m.get('LabelKeys') is not None: + self.label_keys = m.get('LabelKeys') + return self + + +class DeleteModelLabelsResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class DeleteModelLabelsResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: DeleteModelLabelsResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DeleteModelLabelsResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DeleteModelVersionResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class DeleteModelVersionResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: DeleteModelVersionResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DeleteModelVersionResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DeleteModelVersionLabelsRequest(TeaModel): + def __init__( + self, + keys: str = None, + label_keys: str = None, + ): + self.keys = keys + self.label_keys = label_keys + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.keys is not None: + result['Keys'] = self.keys + if self.label_keys is not None: + result['LabelKeys'] = self.label_keys + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Keys') is not None: + self.keys = m.get('Keys') + if m.get('LabelKeys') is not None: + self.label_keys = m.get('LabelKeys') + return self + + +class DeleteModelVersionLabelsResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class DeleteModelVersionLabelsResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: DeleteModelVersionLabelsResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DeleteModelVersionLabelsResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DeleteServiceTemplateResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class DeleteServiceTemplateResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: DeleteServiceTemplateResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DeleteServiceTemplateResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DeleteServiceTemplateLabelsRequest(TeaModel): + def __init__( + self, + label_keys: str = None, + ): + self.label_keys = label_keys + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.label_keys is not None: + result['LabelKeys'] = self.label_keys + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('LabelKeys') is not None: + self.label_keys = m.get('LabelKeys') + return self + + +class DeleteServiceTemplateLabelsResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class DeleteServiceTemplateLabelsResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: DeleteServiceTemplateLabelsResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DeleteServiceTemplateLabelsResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DeleteUserConfigRequest(TeaModel): + def __init__( + self, + config_key: str = None, + ): + self.config_key = config_key + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.config_key is not None: + result['ConfigKey'] = self.config_key + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ConfigKey') is not None: + self.config_key = m.get('ConfigKey') + return self + + +class DeleteUserConfigResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class DeleteUserConfigResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: DeleteUserConfigResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DeleteUserConfigResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DeleteWorkspaceResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class DeleteWorkspaceResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: DeleteWorkspaceResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DeleteWorkspaceResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DeleteWorkspaceResourceRequest(TeaModel): + def __init__( self, - key: str = None, + group_name: str = None, + labels: str = None, + option: str = None, + product_type: str = None, + resource_ids: str = None, + resource_type: str = None, + ): + self.group_name = group_name + self.labels = labels + self.option = option + self.product_type = product_type + self.resource_ids = resource_ids + self.resource_type = resource_type + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.group_name is not None: + result['GroupName'] = self.group_name + if self.labels is not None: + result['Labels'] = self.labels + if self.option is not None: + result['Option'] = self.option + if self.product_type is not None: + result['ProductType'] = self.product_type + if self.resource_ids is not None: + result['ResourceIds'] = self.resource_ids + if self.resource_type is not None: + result['ResourceType'] = self.resource_type + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('GroupName') is not None: + self.group_name = m.get('GroupName') + if m.get('Labels') is not None: + self.labels = m.get('Labels') + if m.get('Option') is not None: + self.option = m.get('Option') + if m.get('ProductType') is not None: + self.product_type = m.get('ProductType') + if m.get('ResourceIds') is not None: + self.resource_ids = m.get('ResourceIds') + if m.get('ResourceType') is not None: + self.resource_type = m.get('ResourceType') + return self + + +class DeleteWorkspaceResourceResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + resource_ids: List[str] = None, + ): + self.request_id = request_id + self.resource_ids = resource_ids + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.resource_ids is not None: + result['ResourceIds'] = self.resource_ids + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('ResourceIds') is not None: + self.resource_ids = m.get('ResourceIds') + return self + + +class DeleteWorkspaceResourceResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: DeleteWorkspaceResourceResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DeleteWorkspaceResourceResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DeleteWorkspaceRolesRequest(TeaModel): + def __init__( + self, + role_ids: str = None, + ): + self.role_ids = role_ids + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.role_ids is not None: + result['RoleIds'] = self.role_ids + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RoleIds') is not None: + self.role_ids = m.get('RoleIds') + return self + + +class DeleteWorkspaceRolesResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class DeleteWorkspaceRolesResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: DeleteWorkspaceRolesResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DeleteWorkspaceRolesResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DescribePricingModuleRequest(TeaModel): + def __init__( + self, + product_code: str = None, + product_type: str = None, + subscription_type: str = None, + ): + # This parameter is required. + self.product_code = product_code + # This parameter is required. + self.product_type = product_type + # This parameter is required. + self.subscription_type = subscription_type + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.product_code is not None: + result['ProductCode'] = self.product_code + if self.product_type is not None: + result['ProductType'] = self.product_type + if self.subscription_type is not None: + result['SubscriptionType'] = self.subscription_type + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ProductCode') is not None: + self.product_code = m.get('ProductCode') + if m.get('ProductType') is not None: + self.product_type = m.get('ProductType') + if m.get('SubscriptionType') is not None: + self.subscription_type = m.get('SubscriptionType') + return self + + +class DescribePricingModuleResponseBodyAttributeListValues(TeaModel): + def __init__( + self, + name: str = None, + remark: str = None, + type: str = None, value: str = None, ): - self.key = key + self.name = name + self.remark = remark + self.type = type self.value = value def validate(self): @@ -791,31 +7395,45 @@ def to_map(self): return _map result = dict() - if self.key is not None: - result['Key'] = self.key + if self.name is not None: + result['Name'] = self.name + if self.remark is not None: + result['Remark'] = self.remark + if self.type is not None: + result['Type'] = self.type if self.value is not None: result['Value'] = self.value return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Key') is not None: - self.key = m.get('Key') + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('Remark') is not None: + self.remark = m.get('Remark') + if m.get('Type') is not None: + self.type = m.get('Type') if m.get('Value') is not None: self.value = m.get('Value') return self -class AddImageLabelsRequest(TeaModel): +class DescribePricingModuleResponseBodyAttributeList(TeaModel): def __init__( self, - labels: List[AddImageLabelsRequestLabels] = None, + code: str = None, + name: str = None, + unit: str = None, + values: List[DescribePricingModuleResponseBodyAttributeListValues] = None, ): - self.labels = labels + self.code = code + self.name = name + self.unit = unit + self.values = values def validate(self): - if self.labels: - for k in self.labels: + if self.values: + for k in self.values: if k: k.validate() @@ -825,28 +7443,216 @@ def to_map(self): return _map result = dict() - result['Labels'] = [] - if self.labels is not None: - for k in self.labels: - result['Labels'].append(k.to_map() if k else None) + if self.code is not None: + result['Code'] = self.code + if self.name is not None: + result['Name'] = self.name + if self.unit is not None: + result['Unit'] = self.unit + result['Values'] = [] + if self.values is not None: + for k in self.values: + result['Values'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() - self.labels = [] - if m.get('Labels') is not None: - for k in m.get('Labels'): - temp_model = AddImageLabelsRequestLabels() - self.labels.append(temp_model.from_map(k)) + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('Unit') is not None: + self.unit = m.get('Unit') + self.values = [] + if m.get('Values') is not None: + for k in m.get('Values'): + temp_model = DescribePricingModuleResponseBodyAttributeListValues() + self.values.append(temp_model.from_map(k)) return self -class AddImageLabelsResponseBody(TeaModel): +class DescribePricingModuleResponseBodyModuleList(TeaModel): + def __init__( + self, + config_list: List[str] = None, + currency: str = None, + module_code: str = None, + module_name: str = None, + price_type: str = None, + ): + self.config_list = config_list + self.currency = currency + self.module_code = module_code + self.module_name = module_name + self.price_type = price_type + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.config_list is not None: + result['ConfigList'] = self.config_list + if self.currency is not None: + result['Currency'] = self.currency + if self.module_code is not None: + result['ModuleCode'] = self.module_code + if self.module_name is not None: + result['ModuleName'] = self.module_name + if self.price_type is not None: + result['PriceType'] = self.price_type + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ConfigList') is not None: + self.config_list = m.get('ConfigList') + if m.get('Currency') is not None: + self.currency = m.get('Currency') + if m.get('ModuleCode') is not None: + self.module_code = m.get('ModuleCode') + if m.get('ModuleName') is not None: + self.module_name = m.get('ModuleName') + if m.get('PriceType') is not None: + self.price_type = m.get('PriceType') + return self + + +class DescribePricingModuleResponseBody(TeaModel): + def __init__( + self, + attribute_list: List[DescribePricingModuleResponseBodyAttributeList] = None, + module_list: List[DescribePricingModuleResponseBodyModuleList] = None, + request_id: str = None, + ): + self.attribute_list = attribute_list + self.module_list = module_list + self.request_id = request_id + + def validate(self): + if self.attribute_list: + for k in self.attribute_list: + if k: + k.validate() + if self.module_list: + for k in self.module_list: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + result['AttributeList'] = [] + if self.attribute_list is not None: + for k in self.attribute_list: + result['AttributeList'].append(k.to_map() if k else None) + result['ModuleList'] = [] + if self.module_list is not None: + for k in self.module_list: + result['ModuleList'].append(k.to_map() if k else None) + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + self.attribute_list = [] + if m.get('AttributeList') is not None: + for k in m.get('AttributeList'): + temp_model = DescribePricingModuleResponseBodyAttributeList() + self.attribute_list.append(temp_model.from_map(k)) + self.module_list = [] + if m.get('ModuleList') is not None: + for k in m.get('ModuleList'): + temp_model = DescribePricingModuleResponseBodyModuleList() + self.module_list.append(temp_model.from_map(k)) + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class DescribePricingModuleResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: DescribePricingModuleResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DescribePricingModuleResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class GetCodeSourceResponseBody(TeaModel): def __init__( self, + accessibility: str = None, + code_branch: str = None, + code_commit: str = None, + code_repo: str = None, + code_repo_access_token: str = None, + code_repo_user_name: str = None, + code_source_id: str = None, + description: str = None, + display_name: str = None, + gmt_create_time: str = None, + gmt_modify_time: str = None, + mount_path: str = None, request_id: str = None, + user_id: str = None, + workspace_id: str = None, ): + self.accessibility = accessibility + self.code_branch = code_branch + self.code_commit = code_commit + self.code_repo = code_repo + self.code_repo_access_token = code_repo_access_token + self.code_repo_user_name = code_repo_user_name + self.code_source_id = code_source_id + self.description = description + self.display_name = display_name + self.gmt_create_time = gmt_create_time + self.gmt_modify_time = gmt_modify_time + self.mount_path = mount_path self.request_id = request_id + self.user_id = user_id + self.workspace_id = workspace_id def validate(self): pass @@ -857,32 +7663,85 @@ def to_map(self): return _map result = dict() + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + if self.code_branch is not None: + result['CodeBranch'] = self.code_branch + if self.code_commit is not None: + result['CodeCommit'] = self.code_commit + if self.code_repo is not None: + result['CodeRepo'] = self.code_repo + if self.code_repo_access_token is not None: + result['CodeRepoAccessToken'] = self.code_repo_access_token + if self.code_repo_user_name is not None: + result['CodeRepoUserName'] = self.code_repo_user_name + if self.code_source_id is not None: + result['CodeSourceId'] = self.code_source_id + if self.description is not None: + result['Description'] = self.description + if self.display_name is not None: + result['DisplayName'] = self.display_name + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modify_time is not None: + result['GmtModifyTime'] = self.gmt_modify_time + if self.mount_path is not None: + result['MountPath'] = self.mount_path if self.request_id is not None: result['RequestId'] = self.request_id + if self.user_id is not None: + result['UserId'] = self.user_id + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + if m.get('CodeBranch') is not None: + self.code_branch = m.get('CodeBranch') + if m.get('CodeCommit') is not None: + self.code_commit = m.get('CodeCommit') + if m.get('CodeRepo') is not None: + self.code_repo = m.get('CodeRepo') + if m.get('CodeRepoAccessToken') is not None: + self.code_repo_access_token = m.get('CodeRepoAccessToken') + if m.get('CodeRepoUserName') is not None: + self.code_repo_user_name = m.get('CodeRepoUserName') + if m.get('CodeSourceId') is not None: + self.code_source_id = m.get('CodeSourceId') + if m.get('Description') is not None: + self.description = m.get('Description') + if m.get('DisplayName') is not None: + self.display_name = m.get('DisplayName') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifyTime') is not None: + self.gmt_modify_time = m.get('GmtModifyTime') + if m.get('MountPath') is not None: + self.mount_path = m.get('MountPath') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class AddImageLabelsResponse(TeaModel): +class GetCodeSourceResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: AddImageLabelsResponseBody = None, + body: GetCodeSourceResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -907,16 +7766,45 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = AddImageLabelsResponseBody() + temp_model = GetCodeSourceResponseBody() self.body = temp_model.from_map(m['body']) return self -class AddMemberRoleResponseBody(TeaModel): +class GetCodeSourcesStatisticsRequest(TeaModel): + def __init__( + self, + workspace_id: str = None, + ): + self.workspace_id = workspace_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + return self + + +class GetCodeSourcesStatisticsResponseBody(TeaModel): def __init__( self, + count: int = None, request_id: str = None, ): + self.count = count self.request_id = request_id def validate(self): @@ -928,32 +7816,33 @@ def to_map(self): return _map result = dict() + if self.count is not None: + result['Count'] = self.count if self.request_id is not None: - result['RequestId'] = self.request_id + result['requestId'] = self.request_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') + if m.get('Count') is not None: + self.count = m.get('Count') + if m.get('requestId') is not None: + self.request_id = m.get('requestId') return self -class AddMemberRoleResponse(TeaModel): +class GetCodeSourcesStatisticsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: AddMemberRoleResponseBody = None, + body: GetCodeSourcesStatisticsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -978,17 +7867,27 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = AddMemberRoleResponseBody() + temp_model = GetCodeSourcesStatisticsResponseBody() self.body = temp_model.from_map(m['body']) return self -class AddWorkspaceQuotaResponseBody(TeaModel): +class GetCollectionResponseBody(TeaModel): def __init__( self, + collection_name: str = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + owner_id: str = None, request_id: str = None, + user_id: str = None, ): + self.collection_name = collection_name + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.owner_id = owner_id self.request_id = request_id + self.user_id = user_id def validate(self): pass @@ -999,32 +7898,49 @@ def to_map(self): return _map result = dict() + if self.collection_name is not None: + result['CollectionName'] = self.collection_name + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.owner_id is not None: + result['OwnerId'] = self.owner_id if self.request_id is not None: result['RequestId'] = self.request_id + if self.user_id is not None: + result['UserId'] = self.user_id return result def from_map(self, m: dict = None): m = m or dict() + if m.get('CollectionName') is not None: + self.collection_name = m.get('CollectionName') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('OwnerId') is not None: + self.owner_id = m.get('OwnerId') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') return self -class AddWorkspaceQuotaResponse(TeaModel): +class GetCollectionResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: AddWorkspaceQuotaResponseBody = None, + body: GetCollectionResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -1049,26 +7965,71 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = AddWorkspaceQuotaResponseBody() + temp_model = GetCollectionResponseBody() self.body = temp_model.from_map(m['body']) return self -class AssumeServiceIdentityRoleResponseBody(TeaModel): +class GetDatasetResponseBody(TeaModel): def __init__( self, - access_key_id: str = None, - access_key_secret: str = None, + accessibility: str = None, + data_source_type: str = None, + data_type: str = None, + dataset_id: str = None, + description: str = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + labels: List[Label] = None, + latest_version: DatasetVersion = None, + name: str = None, + options: str = None, + owner_id: str = None, + property: str = None, + provider: str = None, + provider_type: str = None, request_id: str = None, - security_token: str = None, + source_dataset_id: str = None, + source_dataset_version: str = None, + source_id: str = None, + source_type: str = None, + tag_template_type: str = None, + uri: str = None, + user_id: str = None, + workspace_id: str = None, ): - self.access_key_id = access_key_id - self.access_key_secret = access_key_secret + self.accessibility = accessibility + self.data_source_type = data_source_type + self.data_type = data_type + self.dataset_id = dataset_id + self.description = description + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.labels = labels + self.latest_version = latest_version + self.name = name + self.options = options + self.owner_id = owner_id + self.property = property + self.provider = provider + self.provider_type = provider_type self.request_id = request_id - self.security_token = security_token + self.source_dataset_id = source_dataset_id + self.source_dataset_version = source_dataset_version + self.source_id = source_id + self.source_type = source_type + self.tag_template_type = tag_template_type + self.uri = uri + self.user_id = user_id + self.workspace_id = workspace_id def validate(self): - pass + if self.labels: + for k in self.labels: + if k: + k.validate() + if self.latest_version: + self.latest_version.validate() def to_map(self): _map = super().to_map() @@ -1076,44 +8037,127 @@ def to_map(self): return _map result = dict() - if self.access_key_id is not None: - result['AccessKeyId'] = self.access_key_id - if self.access_key_secret is not None: - result['AccessKeySecret'] = self.access_key_secret + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + if self.data_source_type is not None: + result['DataSourceType'] = self.data_source_type + if self.data_type is not None: + result['DataType'] = self.data_type + if self.dataset_id is not None: + result['DatasetId'] = self.dataset_id + if self.description is not None: + result['Description'] = self.description + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.latest_version is not None: + result['LatestVersion'] = self.latest_version.to_map() + if self.name is not None: + result['Name'] = self.name + if self.options is not None: + result['Options'] = self.options + if self.owner_id is not None: + result['OwnerId'] = self.owner_id + if self.property is not None: + result['Property'] = self.property + if self.provider is not None: + result['Provider'] = self.provider + if self.provider_type is not None: + result['ProviderType'] = self.provider_type if self.request_id is not None: result['RequestId'] = self.request_id - if self.security_token is not None: - result['SecurityToken'] = self.security_token + if self.source_dataset_id is not None: + result['SourceDatasetId'] = self.source_dataset_id + if self.source_dataset_version is not None: + result['SourceDatasetVersion'] = self.source_dataset_version + if self.source_id is not None: + result['SourceId'] = self.source_id + if self.source_type is not None: + result['SourceType'] = self.source_type + if self.tag_template_type is not None: + result['TagTemplateType'] = self.tag_template_type + if self.uri is not None: + result['Uri'] = self.uri + if self.user_id is not None: + result['UserId'] = self.user_id + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('AccessKeyId') is not None: - self.access_key_id = m.get('AccessKeyId') - if m.get('AccessKeySecret') is not None: - self.access_key_secret = m.get('AccessKeySecret') + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + if m.get('DataSourceType') is not None: + self.data_source_type = m.get('DataSourceType') + if m.get('DataType') is not None: + self.data_type = m.get('DataType') + if m.get('DatasetId') is not None: + self.dataset_id = m.get('DatasetId') + if m.get('Description') is not None: + self.description = m.get('Description') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = Label() + self.labels.append(temp_model.from_map(k)) + if m.get('LatestVersion') is not None: + temp_model = DatasetVersion() + self.latest_version = temp_model.from_map(m['LatestVersion']) + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('Options') is not None: + self.options = m.get('Options') + if m.get('OwnerId') is not None: + self.owner_id = m.get('OwnerId') + if m.get('Property') is not None: + self.property = m.get('Property') + if m.get('Provider') is not None: + self.provider = m.get('Provider') + if m.get('ProviderType') is not None: + self.provider_type = m.get('ProviderType') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('SecurityToken') is not None: - self.security_token = m.get('SecurityToken') + if m.get('SourceDatasetId') is not None: + self.source_dataset_id = m.get('SourceDatasetId') + if m.get('SourceDatasetVersion') is not None: + self.source_dataset_version = m.get('SourceDatasetVersion') + if m.get('SourceId') is not None: + self.source_id = m.get('SourceId') + if m.get('SourceType') is not None: + self.source_type = m.get('SourceType') + if m.get('TagTemplateType') is not None: + self.tag_template_type = m.get('TagTemplateType') + if m.get('Uri') is not None: + self.uri = m.get('Uri') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class AssumeServiceIdentityRoleResponse(TeaModel): +class GetDatasetResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: AssumeServiceIdentityRoleResponseBody = None, + body: GetDatasetResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -1138,32 +8182,16 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = AssumeServiceIdentityRoleResponseBody() + temp_model = GetDatasetResponseBody() self.body = temp_model.from_map(m['body']) return self -class CreateCodeSourceRequest(TeaModel): +class GetDatasetsStatisticsRequest(TeaModel): def __init__( self, - accessibility: str = None, - code_branch: str = None, - code_repo: str = None, - code_repo_access_token: str = None, - code_repo_user_name: str = None, - description: str = None, - display_name: str = None, - mount_path: str = None, workspace_id: str = None, ): - self.accessibility = accessibility - self.code_branch = code_branch - self.code_repo = code_repo - self.code_repo_access_token = code_repo_access_token - self.code_repo_user_name = code_repo_user_name - self.description = description - self.display_name = display_name - self.mount_path = mount_path self.workspace_id = workspace_id def validate(self): @@ -1175,56 +8203,24 @@ def to_map(self): return _map result = dict() - if self.accessibility is not None: - result['Accessibility'] = self.accessibility - if self.code_branch is not None: - result['CodeBranch'] = self.code_branch - if self.code_repo is not None: - result['CodeRepo'] = self.code_repo - if self.code_repo_access_token is not None: - result['CodeRepoAccessToken'] = self.code_repo_access_token - if self.code_repo_user_name is not None: - result['CodeRepoUserName'] = self.code_repo_user_name - if self.description is not None: - result['Description'] = self.description - if self.display_name is not None: - result['DisplayName'] = self.display_name - if self.mount_path is not None: - result['MountPath'] = self.mount_path if self.workspace_id is not None: result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Accessibility') is not None: - self.accessibility = m.get('Accessibility') - if m.get('CodeBranch') is not None: - self.code_branch = m.get('CodeBranch') - if m.get('CodeRepo') is not None: - self.code_repo = m.get('CodeRepo') - if m.get('CodeRepoAccessToken') is not None: - self.code_repo_access_token = m.get('CodeRepoAccessToken') - if m.get('CodeRepoUserName') is not None: - self.code_repo_user_name = m.get('CodeRepoUserName') - if m.get('Description') is not None: - self.description = m.get('Description') - if m.get('DisplayName') is not None: - self.display_name = m.get('DisplayName') - if m.get('MountPath') is not None: - self.mount_path = m.get('MountPath') if m.get('WorkspaceId') is not None: self.workspace_id = m.get('WorkspaceId') return self -class CreateCodeSourceResponseBody(TeaModel): +class GetDatasetsStatisticsResponseBody(TeaModel): def __init__( self, - code_source_id: str = None, + count: int = None, request_id: str = None, ): - self.code_source_id = code_source_id + self.count = count self.request_id = request_id def validate(self): @@ -1236,36 +8232,33 @@ def to_map(self): return _map result = dict() - if self.code_source_id is not None: - result['CodeSourceId'] = self.code_source_id + if self.count is not None: + result['Count'] = self.count if self.request_id is not None: result['RequestId'] = self.request_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('CodeSourceId') is not None: - self.code_source_id = m.get('CodeSourceId') + if m.get('Count') is not None: + self.count = m.get('Count') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') return self -class CreateCodeSourceResponse(TeaModel): +class GetDatasetsStatisticsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: CreateCodeSourceResponseBody = None, + body: GetDatasetsStatisticsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -1290,41 +8283,78 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = CreateCodeSourceResponseBody() + temp_model = GetDatasetsStatisticsResponseBody() self.body = temp_model.from_map(m['body']) return self -class CreateDatasetRequest(TeaModel): +class GetDatasetVersionResponseBody(TeaModel): def __init__( self, - accessibility: str = None, + data_count: int = None, + data_size: int = None, data_source_type: str = None, - data_type: str = None, + dataset_id: str = None, description: str = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, labels: List[Label] = None, - name: str = None, options: str = None, property: str = None, - provider_type: str = None, + request_id: str = None, source_id: str = None, source_type: str = None, uri: str = None, - workspace_id: str = None, + version_name: str = None, ): - self.accessibility = accessibility + # 数据集的数据量 + self.data_count = data_count + # 数据集版本的数据大小。 + self.data_size = data_size + # 数据源类型。支持以下取值: + # - OSS:阿里云对象存储(OSS)。 + # - NAS:阿里云文件存储(NAS)。 + # + # This parameter is required. self.data_source_type = data_source_type - self.data_type = data_type + # 代表资源一级ID的资源属性字段 + self.dataset_id = dataset_id + # 数据集版本的描述信息。 self.description = description + self.gmt_create_time = gmt_create_time + # 创建时间。 + self.gmt_modified_time = gmt_modified_time + # 代表资源标签的资源属性字段 self.labels = labels - self.name = name + # 扩展字段,JsonString类型。 + # 当DLC使用数据集时,可通过配置mountPath字段指定数据集默认挂载路径。 self.options = options + # 数据集的属性。支持以下取值: + # - FILE:文件。 + # - DIRECTORY:文件夹。 + # + # This parameter is required. self.property = property - self.provider_type = provider_type + self.request_id = request_id + # 数据来源ID。 self.source_id = source_id + # 数据来源类型,默认为USER。支持以下取值: + # - PAI-PUBLIC-DATASET:PAI公共数据集。 + # - ITAG:iTAG模块标注结果生成的数据集。 + # - USER:用户注册的数据集。 self.source_type = source_type + # Uri配置样例如下: + # - 数据源类型为OSS:`oss://bucket.endpoint/object` + # - 数据源类型为NAS: + # 通用型NAS格式为:`nas://.region/subpath/to/dir/`; + # CPFS1.0:`nas://.region/subpath/to/dir/`; + # CPFS2.0:`nas://.region//`。 + # CPFS1.0和CPFS2.0根据fsid的格式来区分:CPFS1.0 格式为cpfs-<8位ascii字符>;CPFS2.0 格式为cpfs-<16为ascii字符>。 + # + # This parameter is required. self.uri = uri - self.workspace_id = workspace_id + # 代表资源名称的资源属性字段 + self.version_name = version_name def validate(self): if self.labels: @@ -1338,118 +8368,90 @@ def to_map(self): return _map result = dict() - if self.accessibility is not None: - result['Accessibility'] = self.accessibility + if self.data_count is not None: + result['DataCount'] = self.data_count + if self.data_size is not None: + result['DataSize'] = self.data_size if self.data_source_type is not None: result['DataSourceType'] = self.data_source_type - if self.data_type is not None: - result['DataType'] = self.data_type + if self.dataset_id is not None: + result['DatasetId'] = self.dataset_id if self.description is not None: result['Description'] = self.description + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time result['Labels'] = [] if self.labels is not None: for k in self.labels: result['Labels'].append(k.to_map() if k else None) - if self.name is not None: - result['Name'] = self.name if self.options is not None: result['Options'] = self.options if self.property is not None: result['Property'] = self.property - if self.provider_type is not None: - result['ProviderType'] = self.provider_type + if self.request_id is not None: + result['RequestId'] = self.request_id if self.source_id is not None: result['SourceId'] = self.source_id if self.source_type is not None: result['SourceType'] = self.source_type if self.uri is not None: result['Uri'] = self.uri - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.version_name is not None: + result['VersionName'] = self.version_name return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Accessibility') is not None: - self.accessibility = m.get('Accessibility') + if m.get('DataCount') is not None: + self.data_count = m.get('DataCount') + if m.get('DataSize') is not None: + self.data_size = m.get('DataSize') if m.get('DataSourceType') is not None: self.data_source_type = m.get('DataSourceType') - if m.get('DataType') is not None: - self.data_type = m.get('DataType') + if m.get('DatasetId') is not None: + self.dataset_id = m.get('DatasetId') if m.get('Description') is not None: self.description = m.get('Description') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') self.labels = [] if m.get('Labels') is not None: for k in m.get('Labels'): temp_model = Label() self.labels.append(temp_model.from_map(k)) - if m.get('Name') is not None: - self.name = m.get('Name') if m.get('Options') is not None: self.options = m.get('Options') if m.get('Property') is not None: self.property = m.get('Property') - if m.get('ProviderType') is not None: - self.provider_type = m.get('ProviderType') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') if m.get('SourceId') is not None: self.source_id = m.get('SourceId') if m.get('SourceType') is not None: self.source_type = m.get('SourceType') if m.get('Uri') is not None: self.uri = m.get('Uri') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') - return self - - -class CreateDatasetResponseBody(TeaModel): - def __init__( - self, - dataset_id: str = None, - request_id: str = None, - ): - self.dataset_id = dataset_id - self.request_id = request_id - - def validate(self): - pass - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.dataset_id is not None: - result['DatasetId'] = self.dataset_id - if self.request_id is not None: - result['RequestId'] = self.request_id - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('DatasetId') is not None: - self.dataset_id = m.get('DatasetId') - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') + if m.get('VersionName') is not None: + self.version_name = m.get('VersionName') return self -class CreateDatasetResponse(TeaModel): +class GetDatasetVersionResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: CreateDatasetResponseBody = None, + body: GetDatasetVersionResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -1474,52 +8476,17 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = CreateDatasetResponseBody() + temp_model = GetDatasetVersionResponseBody() self.body = temp_model.from_map(m['body']) return self -class CreateDatasetLabelsRequest(TeaModel): - def __init__( - self, - labels: List[Label] = None, - ): - self.labels = labels - - def validate(self): - if self.labels: - for k in self.labels: - if k: - k.validate() - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - result['Labels'] = [] - if self.labels is not None: - for k in self.labels: - result['Labels'].append(k.to_map() if k else None) - return result - - def from_map(self, m: dict = None): - m = m or dict() - self.labels = [] - if m.get('Labels') is not None: - for k in m.get('Labels'): - temp_model = Label() - self.labels.append(temp_model.from_map(k)) - return self - - -class CreateDatasetLabelsResponseBody(TeaModel): +class GetDefaultWorkspaceRequest(TeaModel): def __init__( self, - request_id: str = None, + verbose: bool = None, ): - self.request_id = request_id + self.verbose = verbose def validate(self): pass @@ -1530,69 +8497,27 @@ def to_map(self): return _map result = dict() - if self.request_id is not None: - result['RequestId'] = self.request_id - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') - return self - - -class CreateDatasetLabelsResponse(TeaModel): - def __init__( - self, - headers: Dict[str, str] = None, - status_code: int = None, - body: CreateDatasetLabelsResponseBody = None, - ): - self.headers = headers - self.status_code = status_code - self.body = body - - def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + if self.verbose is not None: + result['Verbose'] = self.verbose return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = CreateDatasetLabelsResponseBody() - self.body = temp_model.from_map(m['body']) + if m.get('Verbose') is not None: + self.verbose = m.get('Verbose') return self -class CreateDefaultWorkspaceRequestResources(TeaModel): +class GetDefaultWorkspaceResponseBodyConditions(TeaModel): def __init__( self, - product_type: str = None, - resource_type: str = None, + code: int = None, + message: str = None, + type: str = None, ): - self.product_type = product_type - self.resource_type = resource_type + self.code = code + self.message = message + self.type = type def validate(self): pass @@ -1603,39 +8528,38 @@ def to_map(self): return _map result = dict() - if self.product_type is not None: - result['ProductType'] = self.product_type - if self.resource_type is not None: - result['ResourceType'] = self.resource_type + if self.code is not None: + result['Code'] = self.code + if self.message is not None: + result['Message'] = self.message + if self.type is not None: + result['Type'] = self.type return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ProductType') is not None: - self.product_type = m.get('ProductType') - if m.get('ResourceType') is not None: - self.resource_type = m.get('ResourceType') + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('Type') is not None: + self.type = m.get('Type') return self -class CreateDefaultWorkspaceRequest(TeaModel): +class GetDefaultWorkspaceResponseBodyOwner(TeaModel): def __init__( self, - add_all_ram_users: bool = None, - description: str = None, - env_types: List[str] = None, - resources: List[CreateDefaultWorkspaceRequestResources] = None, + user_id: str = None, + user_kp: str = None, + user_name: str = None, ): - self.add_all_ram_users = add_all_ram_users - self.description = description - self.env_types = env_types - self.resources = resources + self.user_id = user_id + self.user_kp = user_kp + self.user_name = user_name def validate(self): - if self.resources: - for k in self.resources: - if k: - k.validate() + pass def to_map(self): _map = super().to_map() @@ -1643,45 +8567,61 @@ def to_map(self): return _map result = dict() - if self.add_all_ram_users is not None: - result['AddAllRamUsers'] = self.add_all_ram_users - if self.description is not None: - result['Description'] = self.description - if self.env_types is not None: - result['EnvTypes'] = self.env_types - result['Resources'] = [] - if self.resources is not None: - for k in self.resources: - result['Resources'].append(k.to_map() if k else None) + if self.user_id is not None: + result['UserId'] = self.user_id + if self.user_kp is not None: + result['UserKp'] = self.user_kp + if self.user_name is not None: + result['UserName'] = self.user_name return result def from_map(self, m: dict = None): m = m or dict() - if m.get('AddAllRamUsers') is not None: - self.add_all_ram_users = m.get('AddAllRamUsers') - if m.get('Description') is not None: - self.description = m.get('Description') - if m.get('EnvTypes') is not None: - self.env_types = m.get('EnvTypes') - self.resources = [] - if m.get('Resources') is not None: - for k in m.get('Resources'): - temp_model = CreateDefaultWorkspaceRequestResources() - self.resources.append(temp_model.from_map(k)) + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('UserKp') is not None: + self.user_kp = m.get('UserKp') + if m.get('UserName') is not None: + self.user_name = m.get('UserName') return self -class CreateDefaultWorkspaceResponseBody(TeaModel): +class GetDefaultWorkspaceResponseBody(TeaModel): def __init__( self, + conditions: List[GetDefaultWorkspaceResponseBodyConditions] = None, + creator: str = None, + description: str = None, + display_name: str = None, + env_types: List[str] = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + owner: GetDefaultWorkspaceResponseBodyOwner = None, request_id: str = None, + status: str = None, workspace_id: str = None, + workspace_name: str = None, ): + self.conditions = conditions + self.creator = creator + self.description = description + self.display_name = display_name + self.env_types = env_types + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.owner = owner self.request_id = request_id + self.status = status self.workspace_id = workspace_id + self.workspace_name = workspace_name def validate(self): - pass + if self.conditions: + for k in self.conditions: + if k: + k.validate() + if self.owner: + self.owner.validate() def to_map(self): _map = super().to_map() @@ -1689,36 +8629,79 @@ def to_map(self): return _map result = dict() + result['Conditions'] = [] + if self.conditions is not None: + for k in self.conditions: + result['Conditions'].append(k.to_map() if k else None) + if self.creator is not None: + result['Creator'] = self.creator + if self.description is not None: + result['Description'] = self.description + if self.display_name is not None: + result['DisplayName'] = self.display_name + if self.env_types is not None: + result['EnvTypes'] = self.env_types + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.owner is not None: + result['Owner'] = self.owner.to_map() if self.request_id is not None: result['RequestId'] = self.request_id + if self.status is not None: + result['Status'] = self.status if self.workspace_id is not None: result['WorkspaceId'] = self.workspace_id + if self.workspace_name is not None: + result['WorkspaceName'] = self.workspace_name return result def from_map(self, m: dict = None): m = m or dict() + self.conditions = [] + if m.get('Conditions') is not None: + for k in m.get('Conditions'): + temp_model = GetDefaultWorkspaceResponseBodyConditions() + self.conditions.append(temp_model.from_map(k)) + if m.get('Creator') is not None: + self.creator = m.get('Creator') + if m.get('Description') is not None: + self.description = m.get('Description') + if m.get('DisplayName') is not None: + self.display_name = m.get('DisplayName') + if m.get('EnvTypes') is not None: + self.env_types = m.get('EnvTypes') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('Owner') is not None: + temp_model = GetDefaultWorkspaceResponseBodyOwner() + self.owner = temp_model.from_map(m['Owner']) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('Status') is not None: + self.status = m.get('Status') if m.get('WorkspaceId') is not None: self.workspace_id = m.get('WorkspaceId') + if m.get('WorkspaceName') is not None: + self.workspace_name = m.get('WorkspaceName') return self -class CreateDefaultWorkspaceResponse(TeaModel): +class GetDefaultWorkspaceResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: CreateDefaultWorkspaceResponseBody = None, + body: GetDefaultWorkspaceResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -1743,59 +8726,55 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = CreateDefaultWorkspaceResponseBody() + temp_model = GetDefaultWorkspaceResponseBody() self.body = temp_model.from_map(m['body']) return self -class CreateDingTalkRobotMessageRequest(TeaModel): - def __init__( - self, - access_token: str = None, - message: str = None, - secret: str = None, - ): - self.access_token = access_token - self.message = message - self.secret = secret - - def validate(self): - pass - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.access_token is not None: - result['AccessToken'] = self.access_token - if self.message is not None: - result['Message'] = self.message - if self.secret is not None: - result['Secret'] = self.secret - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('AccessToken') is not None: - self.access_token = m.get('AccessToken') - if m.get('Message') is not None: - self.message = m.get('Message') - if m.get('Secret') is not None: - self.secret = m.get('Secret') - return self - - -class CreateDingTalkRobotMessageResponseBody(TeaModel): +class GetExperimentResponseBody(TeaModel): def __init__( self, + accessibility: str = None, + artifact_uri: str = None, + experiment_id: str = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + labels: List[ExperimentLabel] = None, + name: str = None, + owner_id: str = None, request_id: str = None, + tensorboard_log_uri: str = None, + user_id: str = None, + workspace_id: str = None, ): + self.accessibility = accessibility + # Artifact的OSS存储路径 + self.artifact_uri = artifact_uri + # 实验UUID + self.experiment_id = experiment_id + # 创建时间 + self.gmt_create_time = gmt_create_time + # 更新时间 + self.gmt_modified_time = gmt_modified_time + # 标签 + self.labels = labels + # 名称 + self.name = name + # 拥有者ID + self.owner_id = owner_id self.request_id = request_id + # tensorboard日志OSS存储路径 + self.tensorboard_log_uri = tensorboard_log_uri + # 创建者ID + self.user_id = user_id + # 工作空间ID + self.workspace_id = workspace_id def validate(self): - pass + if self.labels: + for k in self.labels: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -1803,32 +8782,78 @@ def to_map(self): return _map result = dict() + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + if self.artifact_uri is not None: + result['ArtifactUri'] = self.artifact_uri + if self.experiment_id is not None: + result['ExperimentId'] = self.experiment_id + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.name is not None: + result['Name'] = self.name + if self.owner_id is not None: + result['OwnerId'] = self.owner_id if self.request_id is not None: result['RequestId'] = self.request_id + if self.tensorboard_log_uri is not None: + result['TensorboardLogUri'] = self.tensorboard_log_uri + if self.user_id is not None: + result['UserId'] = self.user_id + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + if m.get('ArtifactUri') is not None: + self.artifact_uri = m.get('ArtifactUri') + if m.get('ExperimentId') is not None: + self.experiment_id = m.get('ExperimentId') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = ExperimentLabel() + self.labels.append(temp_model.from_map(k)) + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('OwnerId') is not None: + self.owner_id = m.get('OwnerId') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('TensorboardLogUri') is not None: + self.tensorboard_log_uri = m.get('TensorboardLogUri') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class CreateDingTalkRobotMessageResponse(TeaModel): +class GetExperimentResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: CreateDingTalkRobotMessageResponseBody = None, + body: GetExperimentResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -1853,19 +8878,17 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = CreateDingTalkRobotMessageResponseBody() + temp_model = GetExperimentResponseBody() self.body = temp_model.from_map(m['body']) return self -class CreateMemberRequestMembers(TeaModel): +class GetImageRequest(TeaModel): def __init__( self, - roles: List[str] = None, - user_id: str = None, + verbose: bool = None, ): - self.roles = roles - self.user_id = user_id + self.verbose = verbose def validate(self): pass @@ -1876,68 +8899,25 @@ def to_map(self): return _map result = dict() - if self.roles is not None: - result['Roles'] = self.roles - if self.user_id is not None: - result['UserId'] = self.user_id - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('Roles') is not None: - self.roles = m.get('Roles') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') - return self - - -class CreateMemberRequest(TeaModel): - def __init__( - self, - members: List[CreateMemberRequestMembers] = None, - ): - self.members = members - - def validate(self): - if self.members: - for k in self.members: - if k: - k.validate() - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - result['Members'] = [] - if self.members is not None: - for k in self.members: - result['Members'].append(k.to_map() if k else None) + if self.verbose is not None: + result['Verbose'] = self.verbose return result def from_map(self, m: dict = None): m = m or dict() - self.members = [] - if m.get('Members') is not None: - for k in m.get('Members'): - temp_model = CreateMemberRequestMembers() - self.members.append(temp_model.from_map(k)) + if m.get('Verbose') is not None: + self.verbose = m.get('Verbose') return self -class CreateMemberResponseBodyMembers(TeaModel): +class GetImageResponseBodyLabels(TeaModel): def __init__( self, - display_name: str = None, - member_id: str = None, - roles: List[str] = None, - user_id: str = None, + key: str = None, + value: str = None, ): - self.display_name = display_name - self.member_id = member_id - self.roles = roles - self.user_id = user_id + self.key = key + self.value = value def validate(self): pass @@ -1948,41 +8928,53 @@ def to_map(self): return _map result = dict() - if self.display_name is not None: - result['DisplayName'] = self.display_name - if self.member_id is not None: - result['MemberId'] = self.member_id - if self.roles is not None: - result['Roles'] = self.roles - if self.user_id is not None: - result['UserId'] = self.user_id + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value return result def from_map(self, m: dict = None): m = m or dict() - if m.get('DisplayName') is not None: - self.display_name = m.get('DisplayName') - if m.get('MemberId') is not None: - self.member_id = m.get('MemberId') - if m.get('Roles') is not None: - self.roles = m.get('Roles') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') return self -class CreateMemberResponseBody(TeaModel): +class GetImageResponseBody(TeaModel): def __init__( self, - members: List[CreateMemberResponseBodyMembers] = None, + accessibility: str = None, + description: str = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + image_uri: str = None, + labels: List[GetImageResponseBodyLabels] = None, + name: str = None, + parent_user_id: str = None, request_id: str = None, + size: int = None, + user_id: str = None, + workspace_id: str = None, ): - self.members = members + self.accessibility = accessibility + self.description = description + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.image_uri = image_uri + self.labels = labels + self.name = name + self.parent_user_id = parent_user_id self.request_id = request_id + self.size = size + self.user_id = user_id + self.workspace_id = workspace_id def validate(self): - if self.members: - for k in self.members: + if self.labels: + for k in self.labels: if k: k.validate() @@ -1992,41 +8984,78 @@ def to_map(self): return _map result = dict() - result['Members'] = [] - if self.members is not None: - for k in self.members: - result['Members'].append(k.to_map() if k else None) + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + if self.description is not None: + result['Description'] = self.description + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.image_uri is not None: + result['ImageUri'] = self.image_uri + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.name is not None: + result['Name'] = self.name + if self.parent_user_id is not None: + result['ParentUserId'] = self.parent_user_id if self.request_id is not None: result['RequestId'] = self.request_id + if self.size is not None: + result['Size'] = self.size + if self.user_id is not None: + result['UserId'] = self.user_id + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - self.members = [] - if m.get('Members') is not None: - for k in m.get('Members'): - temp_model = CreateMemberResponseBodyMembers() - self.members.append(temp_model.from_map(k)) + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + if m.get('Description') is not None: + self.description = m.get('Description') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('ImageUri') is not None: + self.image_uri = m.get('ImageUri') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = GetImageResponseBodyLabels() + self.labels.append(temp_model.from_map(k)) + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('ParentUserId') is not None: + self.parent_user_id = m.get('ParentUserId') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('Size') is not None: + self.size = m.get('Size') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class CreateMemberResponse(TeaModel): +class GetImageResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: CreateMemberResponseBody = None, + body: GetImageResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -2051,39 +9080,20 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = CreateMemberResponseBody() + temp_model = GetImageResponseBody() self.body = temp_model.from_map(m['body']) return self -class CreateModelRequest(TeaModel): +class GetImagesStatisticsRequest(TeaModel): def __init__( self, - accessibility: str = None, - domain: str = None, - labels: List[Label] = None, - model_description: str = None, - model_doc: str = None, - model_name: str = None, - origin: str = None, - task: str = None, workspace_id: str = None, ): - self.accessibility = accessibility - self.domain = domain - self.labels = labels - self.model_description = model_description - self.model_doc = model_doc - self.model_name = model_name - self.origin = origin - self.task = task self.workspace_id = workspace_id def validate(self): - if self.labels: - for k in self.labels: - if k: - k.validate() + pass def to_map(self): _map = super().to_map() @@ -2091,61 +9101,24 @@ def to_map(self): return _map result = dict() - if self.accessibility is not None: - result['Accessibility'] = self.accessibility - if self.domain is not None: - result['Domain'] = self.domain - result['Labels'] = [] - if self.labels is not None: - for k in self.labels: - result['Labels'].append(k.to_map() if k else None) - if self.model_description is not None: - result['ModelDescription'] = self.model_description - if self.model_doc is not None: - result['ModelDoc'] = self.model_doc - if self.model_name is not None: - result['ModelName'] = self.model_name - if self.origin is not None: - result['Origin'] = self.origin - if self.task is not None: - result['Task'] = self.task if self.workspace_id is not None: result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Accessibility') is not None: - self.accessibility = m.get('Accessibility') - if m.get('Domain') is not None: - self.domain = m.get('Domain') - self.labels = [] - if m.get('Labels') is not None: - for k in m.get('Labels'): - temp_model = Label() - self.labels.append(temp_model.from_map(k)) - if m.get('ModelDescription') is not None: - self.model_description = m.get('ModelDescription') - if m.get('ModelDoc') is not None: - self.model_doc = m.get('ModelDoc') - if m.get('ModelName') is not None: - self.model_name = m.get('ModelName') - if m.get('Origin') is not None: - self.origin = m.get('Origin') - if m.get('Task') is not None: - self.task = m.get('Task') if m.get('WorkspaceId') is not None: self.workspace_id = m.get('WorkspaceId') return self -class CreateModelResponseBody(TeaModel): +class GetImagesStatisticsResponseBody(TeaModel): def __init__( self, - model_id: str = None, + count: int = None, request_id: str = None, ): - self.model_id = model_id + self.count = count self.request_id = request_id def validate(self): @@ -2157,77 +9130,35 @@ def to_map(self): return _map result = dict() - if self.model_id is not None: - result['ModelId'] = self.model_id + if self.count is not None: + result['Count'] = self.count if self.request_id is not None: result['RequestId'] = self.request_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ModelId') is not None: - self.model_id = m.get('ModelId') + if m.get('Count') is not None: + self.count = m.get('Count') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') return self -class CreateModelResponse(TeaModel): +class GetImagesStatisticsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: CreateModelResponseBody = None, + body: GetImagesStatisticsResponseBody = None, ): self.headers = headers - self.status_code = status_code - self.body = body - - def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = CreateModelResponseBody() - self.body = temp_model.from_map(m['body']) - return self - - -class CreateModelLabelsRequest(TeaModel): - def __init__( - self, - labels: List[Label] = None, - ): - self.labels = labels + self.status_code = status_code + self.body = body def validate(self): - if self.labels: - for k in self.labels: - if k: - k.validate() + if self.body: + self.body.validate() def to_map(self): _map = super().to_map() @@ -2235,28 +9166,44 @@ def to_map(self): return _map result = dict() - result['Labels'] = [] - if self.labels is not None: - for k in self.labels: - result['Labels'].append(k.to_map() if k else None) + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() return result def from_map(self, m: dict = None): m = m or dict() - self.labels = [] - if m.get('Labels') is not None: - for k in m.get('Labels'): - temp_model = Label() - self.labels.append(temp_model.from_map(k)) + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetImagesStatisticsResponseBody() + self.body = temp_model.from_map(m['body']) return self -class CreateModelLabelsResponseBody(TeaModel): +class GetInstanceJobResponseBody(TeaModel): def __init__( self, + gmt_create_time: str = None, + instance_id: str = None, + instance_job_id: str = None, + reason_message: str = None, request_id: str = None, + status: str = None, + type: str = None, ): + self.gmt_create_time = gmt_create_time + self.instance_id = instance_id + self.instance_job_id = instance_job_id + self.reason_message = reason_message self.request_id = request_id + self.status = status + self.type = type def validate(self): pass @@ -2267,32 +9214,53 @@ def to_map(self): return _map result = dict() + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + if self.instance_job_id is not None: + result['InstanceJobId'] = self.instance_job_id + if self.reason_message is not None: + result['ReasonMessage'] = self.reason_message if self.request_id is not None: result['RequestId'] = self.request_id + if self.status is not None: + result['Status'] = self.status + if self.type is not None: + result['Type'] = self.type return result def from_map(self, m: dict = None): m = m or dict() + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + if m.get('InstanceJobId') is not None: + self.instance_job_id = m.get('InstanceJobId') + if m.get('ReasonMessage') is not None: + self.reason_message = m.get('ReasonMessage') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('Status') is not None: + self.status = m.get('Status') + if m.get('Type') is not None: + self.type = m.get('Type') return self -class CreateModelLabelsResponse(TeaModel): +class GetInstanceJobResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: CreateModelLabelsResponseBody = None, + body: GetInstanceJobResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -2317,19 +9285,22 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = CreateModelLabelsResponseBody() + temp_model = GetInstanceJobResponseBody() self.body = temp_model.from_map(m['body']) return self -class CreateModelReleaseRequest(TeaModel): +class GetInstanceStatisticsRequest(TeaModel): def __init__( self, - target_model_origin: str = None, - target_model_provider: str = None, + option: str = None, + status: str = None, + workspace_id: str = None, ): - self.target_model_origin = target_model_origin - self.target_model_provider = target_model_provider + self.option = option + self.status = status + # This parameter is required. + self.workspace_id = workspace_id def validate(self): pass @@ -2340,32 +9311,72 @@ def to_map(self): return _map result = dict() - if self.target_model_origin is not None: - result['TargetModelOrigin'] = self.target_model_origin - if self.target_model_provider is not None: - result['TargetModelProvider'] = self.target_model_provider + if self.option is not None: + result['Option'] = self.option + if self.status is not None: + result['Status'] = self.status + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('TargetModelOrigin') is not None: - self.target_model_origin = m.get('TargetModelOrigin') - if m.get('TargetModelProvider') is not None: - self.target_model_provider = m.get('TargetModelProvider') + if m.get('Option') is not None: + self.option = m.get('Option') + if m.get('Status') is not None: + self.status = m.get('Status') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class CreateModelReleaseResponseBody(TeaModel): +class GetInstanceStatisticsResponseBodyInstances(TeaModel): def __init__( self, - model_id: str = None, + count: int = None, + instance_type: str = None, + ): + self.count = count + self.instance_type = instance_type + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.count is not None: + result['Count'] = self.count + if self.instance_type is not None: + result['InstanceType'] = self.instance_type + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Count') is not None: + self.count = m.get('Count') + if m.get('InstanceType') is not None: + self.instance_type = m.get('InstanceType') + return self + + +class GetInstanceStatisticsResponseBody(TeaModel): + def __init__( + self, + instances: List[GetInstanceStatisticsResponseBodyInstances] = None, request_id: str = None, ): - self.model_id = model_id + self.instances = instances self.request_id = request_id def validate(self): - pass + if self.instances: + for k in self.instances: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -2373,36 +9384,38 @@ def to_map(self): return _map result = dict() - if self.model_id is not None: - result['ModelId'] = self.model_id + result['Instances'] = [] + if self.instances is not None: + for k in self.instances: + result['Instances'].append(k.to_map() if k else None) if self.request_id is not None: - result['RequestId'] = self.request_id + result['requestId'] = self.request_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ModelId') is not None: - self.model_id = m.get('ModelId') - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') + self.instances = [] + if m.get('Instances') is not None: + for k in m.get('Instances'): + temp_model = GetInstanceStatisticsResponseBodyInstances() + self.instances.append(temp_model.from_map(k)) + if m.get('requestId') is not None: + self.request_id = m.get('requestId') return self -class CreateModelReleaseResponse(TeaModel): +class GetInstanceStatisticsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: CreateModelReleaseResponseBody = None, + body: GetInstanceStatisticsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -2427,47 +9440,22 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = CreateModelReleaseResponseBody() + temp_model = GetInstanceStatisticsResponseBody() self.body = temp_model.from_map(m['body']) return self -class CreateModelVersionRequest(TeaModel): +class GetMemberRequest(TeaModel): def __init__( self, - approval_status: str = None, - format_type: str = None, - framework_type: str = None, - inference_spec: Dict[str, Any] = None, - labels: List[Label] = None, - metrics: Dict[str, Any] = None, - options: str = None, - source_id: str = None, - source_type: str = None, - training_spec: Dict[str, Any] = None, - uri: str = None, - version_description: str = None, - version_name: str = None, + member_id: str = None, + user_id: str = None, ): - self.approval_status = approval_status - self.format_type = format_type - self.framework_type = framework_type - self.inference_spec = inference_spec - self.labels = labels - self.metrics = metrics - self.options = options - self.source_id = source_id - self.source_type = source_type - self.training_spec = training_spec - self.uri = uri - self.version_description = version_description - self.version_name = version_name + self.member_id = member_id + self.user_id = user_id def validate(self): - if self.labels: - for k in self.labels: - if k: - k.validate() + pass def to_map(self): _map = super().to_map() @@ -2475,78 +9463,39 @@ def to_map(self): return _map result = dict() - if self.approval_status is not None: - result['ApprovalStatus'] = self.approval_status - if self.format_type is not None: - result['FormatType'] = self.format_type - if self.framework_type is not None: - result['FrameworkType'] = self.framework_type - if self.inference_spec is not None: - result['InferenceSpec'] = self.inference_spec - result['Labels'] = [] - if self.labels is not None: - for k in self.labels: - result['Labels'].append(k.to_map() if k else None) - if self.metrics is not None: - result['Metrics'] = self.metrics - if self.options is not None: - result['Options'] = self.options - if self.source_id is not None: - result['SourceId'] = self.source_id - if self.source_type is not None: - result['SourceType'] = self.source_type - if self.training_spec is not None: - result['TrainingSpec'] = self.training_spec - if self.uri is not None: - result['Uri'] = self.uri - if self.version_description is not None: - result['VersionDescription'] = self.version_description - if self.version_name is not None: - result['VersionName'] = self.version_name + if self.member_id is not None: + result['MemberId'] = self.member_id + if self.user_id is not None: + result['UserId'] = self.user_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ApprovalStatus') is not None: - self.approval_status = m.get('ApprovalStatus') - if m.get('FormatType') is not None: - self.format_type = m.get('FormatType') - if m.get('FrameworkType') is not None: - self.framework_type = m.get('FrameworkType') - if m.get('InferenceSpec') is not None: - self.inference_spec = m.get('InferenceSpec') - self.labels = [] - if m.get('Labels') is not None: - for k in m.get('Labels'): - temp_model = Label() - self.labels.append(temp_model.from_map(k)) - if m.get('Metrics') is not None: - self.metrics = m.get('Metrics') - if m.get('Options') is not None: - self.options = m.get('Options') - if m.get('SourceId') is not None: - self.source_id = m.get('SourceId') - if m.get('SourceType') is not None: - self.source_type = m.get('SourceType') - if m.get('TrainingSpec') is not None: - self.training_spec = m.get('TrainingSpec') - if m.get('Uri') is not None: - self.uri = m.get('Uri') - if m.get('VersionDescription') is not None: - self.version_description = m.get('VersionDescription') - if m.get('VersionName') is not None: - self.version_name = m.get('VersionName') + if m.get('MemberId') is not None: + self.member_id = m.get('MemberId') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') return self -class CreateModelVersionResponseBody(TeaModel): +class GetMemberResponseBody(TeaModel): def __init__( self, + display_name: str = None, + gmt_create_time: str = None, + member_id: str = None, + member_name: str = None, request_id: str = None, - version_name: str = None, + roles: List[str] = None, + user_id: str = None, ): + self.display_name = display_name + self.gmt_create_time = gmt_create_time + self.member_id = member_id + self.member_name = member_name self.request_id = request_id - self.version_name = version_name + self.roles = roles + self.user_id = user_id def validate(self): pass @@ -2557,36 +9506,53 @@ def to_map(self): return _map result = dict() + if self.display_name is not None: + result['DisplayName'] = self.display_name + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.member_id is not None: + result['MemberId'] = self.member_id + if self.member_name is not None: + result['MemberName'] = self.member_name if self.request_id is not None: result['RequestId'] = self.request_id - if self.version_name is not None: - result['VersionName'] = self.version_name + if self.roles is not None: + result['Roles'] = self.roles + if self.user_id is not None: + result['UserId'] = self.user_id return result def from_map(self, m: dict = None): m = m or dict() + if m.get('DisplayName') is not None: + self.display_name = m.get('DisplayName') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('MemberId') is not None: + self.member_id = m.get('MemberId') + if m.get('MemberName') is not None: + self.member_name = m.get('MemberName') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('VersionName') is not None: - self.version_name = m.get('VersionName') + if m.get('Roles') is not None: + self.roles = m.get('Roles') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') return self -class CreateModelVersionResponse(TeaModel): +class GetMemberResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: CreateModelVersionResponseBody = None, + body: GetMemberResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -2611,23 +9577,63 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = CreateModelVersionResponseBody() + temp_model = GetMemberResponseBody() self.body = temp_model.from_map(m['body']) return self -class CreateModelVersionLabelsRequest(TeaModel): +class GetModelResponseBody(TeaModel): def __init__( self, + accessibility: str = None, + domain: str = None, + extra_info: Dict[str, Any] = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, labels: List[Label] = None, + latest_version: ModelVersion = None, + model_description: str = None, + model_doc: str = None, + model_id: str = None, + model_name: str = None, + model_type: str = None, + order_number: int = None, + origin: str = None, + owner_id: str = None, + provider: str = None, + request_id: str = None, + task: str = None, + user_id: str = None, + workspace_id: str = None, ): + self.accessibility = accessibility + self.domain = domain + self.extra_info = extra_info + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time self.labels = labels + self.latest_version = latest_version + self.model_description = model_description + self.model_doc = model_doc + self.model_id = model_id + self.model_name = model_name + self.model_type = model_type + self.order_number = order_number + self.origin = origin + self.owner_id = owner_id + self.provider = provider + self.request_id = request_id + self.task = task + self.user_id = user_id + self.workspace_id = workspace_id def validate(self): if self.labels: for k in self.labels: if k: k.validate() + if self.latest_version: + self.latest_version.validate() def to_map(self): _map = super().to_map() @@ -2635,64 +9641,111 @@ def to_map(self): return _map result = dict() + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + if self.domain is not None: + result['Domain'] = self.domain + if self.extra_info is not None: + result['ExtraInfo'] = self.extra_info + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time result['Labels'] = [] if self.labels is not None: for k in self.labels: result['Labels'].append(k.to_map() if k else None) + if self.latest_version is not None: + result['LatestVersion'] = self.latest_version.to_map() + if self.model_description is not None: + result['ModelDescription'] = self.model_description + if self.model_doc is not None: + result['ModelDoc'] = self.model_doc + if self.model_id is not None: + result['ModelId'] = self.model_id + if self.model_name is not None: + result['ModelName'] = self.model_name + if self.model_type is not None: + result['ModelType'] = self.model_type + if self.order_number is not None: + result['OrderNumber'] = self.order_number + if self.origin is not None: + result['Origin'] = self.origin + if self.owner_id is not None: + result['OwnerId'] = self.owner_id + if self.provider is not None: + result['Provider'] = self.provider + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.task is not None: + result['Task'] = self.task + if self.user_id is not None: + result['UserId'] = self.user_id + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + if m.get('Domain') is not None: + self.domain = m.get('Domain') + if m.get('ExtraInfo') is not None: + self.extra_info = m.get('ExtraInfo') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') self.labels = [] if m.get('Labels') is not None: for k in m.get('Labels'): temp_model = Label() self.labels.append(temp_model.from_map(k)) - return self - - -class CreateModelVersionLabelsResponseBody(TeaModel): - def __init__( - self, - request_id: str = None, - ): - self.request_id = request_id - - def validate(self): - pass - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.request_id is not None: - result['RequestId'] = self.request_id - return result - - def from_map(self, m: dict = None): - m = m or dict() + if m.get('LatestVersion') is not None: + temp_model = ModelVersion() + self.latest_version = temp_model.from_map(m['LatestVersion']) + if m.get('ModelDescription') is not None: + self.model_description = m.get('ModelDescription') + if m.get('ModelDoc') is not None: + self.model_doc = m.get('ModelDoc') + if m.get('ModelId') is not None: + self.model_id = m.get('ModelId') + if m.get('ModelName') is not None: + self.model_name = m.get('ModelName') + if m.get('ModelType') is not None: + self.model_type = m.get('ModelType') + if m.get('OrderNumber') is not None: + self.order_number = m.get('OrderNumber') + if m.get('Origin') is not None: + self.origin = m.get('Origin') + if m.get('OwnerId') is not None: + self.owner_id = m.get('OwnerId') + if m.get('Provider') is not None: + self.provider = m.get('Provider') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('Task') is not None: + self.task = m.get('Task') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class CreateModelVersionLabelsResponse(TeaModel): +class GetModelResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: CreateModelVersionLabelsResponseBody = None, + body: GetModelResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -2717,57 +9770,63 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = CreateModelVersionLabelsResponseBody() + temp_model = GetModelResponseBody() self.body = temp_model.from_map(m['body']) return self -class CreateModelVersionReleaseRequest(TeaModel): - def __init__( - self, - target_model_origin: str = None, - target_model_provider: str = None, - ): - self.target_model_origin = target_model_origin - self.target_model_provider = target_model_provider - - def validate(self): - pass - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.target_model_origin is not None: - result['TargetModelOrigin'] = self.target_model_origin - if self.target_model_provider is not None: - result['TargetModelProvider'] = self.target_model_provider - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('TargetModelOrigin') is not None: - self.target_model_origin = m.get('TargetModelOrigin') - if m.get('TargetModelProvider') is not None: - self.target_model_provider = m.get('TargetModelProvider') - return self - - -class CreateModelVersionReleaseResponseBody(TeaModel): +class GetModelVersionResponseBody(TeaModel): def __init__( self, - model_id: str = None, + approval_status: str = None, + compression_spec: Dict[str, Any] = None, + evaluation_spec: Dict[str, Any] = None, + extra_info: Dict[str, Any] = None, + format_type: str = None, + framework_type: str = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + inference_spec: Dict[str, Any] = None, + labels: List[Label] = None, + metrics: Dict[str, Any] = None, + options: str = None, + owner_id: str = None, request_id: str = None, + source_id: str = None, + source_type: str = None, + training_spec: Dict[str, Any] = None, + uri: str = None, + user_id: str = None, + version_description: str = None, version_name: str = None, ): - self.model_id = model_id + self.approval_status = approval_status + self.compression_spec = compression_spec + self.evaluation_spec = evaluation_spec + self.extra_info = extra_info + self.format_type = format_type + self.framework_type = framework_type + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.inference_spec = inference_spec + self.labels = labels + self.metrics = metrics + self.options = options + self.owner_id = owner_id self.request_id = request_id + self.source_id = source_id + self.source_type = source_type + self.training_spec = training_spec + self.uri = uri + self.user_id = user_id + self.version_description = version_description self.version_name = version_name def validate(self): - pass + if self.labels: + for k in self.labels: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -2775,40 +9834,114 @@ def to_map(self): return _map result = dict() - if self.model_id is not None: - result['ModelId'] = self.model_id + if self.approval_status is not None: + result['ApprovalStatus'] = self.approval_status + if self.compression_spec is not None: + result['CompressionSpec'] = self.compression_spec + if self.evaluation_spec is not None: + result['EvaluationSpec'] = self.evaluation_spec + if self.extra_info is not None: + result['ExtraInfo'] = self.extra_info + if self.format_type is not None: + result['FormatType'] = self.format_type + if self.framework_type is not None: + result['FrameworkType'] = self.framework_type + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.inference_spec is not None: + result['InferenceSpec'] = self.inference_spec + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.metrics is not None: + result['Metrics'] = self.metrics + if self.options is not None: + result['Options'] = self.options + if self.owner_id is not None: + result['OwnerId'] = self.owner_id if self.request_id is not None: result['RequestId'] = self.request_id + if self.source_id is not None: + result['SourceId'] = self.source_id + if self.source_type is not None: + result['SourceType'] = self.source_type + if self.training_spec is not None: + result['TrainingSpec'] = self.training_spec + if self.uri is not None: + result['Uri'] = self.uri + if self.user_id is not None: + result['UserId'] = self.user_id + if self.version_description is not None: + result['VersionDescription'] = self.version_description if self.version_name is not None: result['VersionName'] = self.version_name return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ModelId') is not None: - self.model_id = m.get('ModelId') + if m.get('ApprovalStatus') is not None: + self.approval_status = m.get('ApprovalStatus') + if m.get('CompressionSpec') is not None: + self.compression_spec = m.get('CompressionSpec') + if m.get('EvaluationSpec') is not None: + self.evaluation_spec = m.get('EvaluationSpec') + if m.get('ExtraInfo') is not None: + self.extra_info = m.get('ExtraInfo') + if m.get('FormatType') is not None: + self.format_type = m.get('FormatType') + if m.get('FrameworkType') is not None: + self.framework_type = m.get('FrameworkType') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('InferenceSpec') is not None: + self.inference_spec = m.get('InferenceSpec') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = Label() + self.labels.append(temp_model.from_map(k)) + if m.get('Metrics') is not None: + self.metrics = m.get('Metrics') + if m.get('Options') is not None: + self.options = m.get('Options') + if m.get('OwnerId') is not None: + self.owner_id = m.get('OwnerId') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('SourceId') is not None: + self.source_id = m.get('SourceId') + if m.get('SourceType') is not None: + self.source_type = m.get('SourceType') + if m.get('TrainingSpec') is not None: + self.training_spec = m.get('TrainingSpec') + if m.get('Uri') is not None: + self.uri = m.get('Uri') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('VersionDescription') is not None: + self.version_description = m.get('VersionDescription') if m.get('VersionName') is not None: self.version_name = m.get('VersionName') return self -class CreateModelVersionReleaseResponse(TeaModel): +class GetModelVersionResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: CreateModelVersionReleaseResponseBody = None, + body: GetModelVersionResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -2833,21 +9966,21 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = CreateModelVersionReleaseResponseBody() + temp_model = GetModelVersionResponseBody() self.body = temp_model.from_map(m['body']) return self -class CreateProductOrdersRequestProductsInstanceProperties(TeaModel): +class GetPayAsYouGoPriceRequestModuleList(TeaModel): def __init__( self, - code: str = None, - name: str = None, - value: str = None, + config: str = None, + module_code: str = None, + price_type: str = None, ): - self.code = code - self.name = name - self.value = value + self.config = config + self.module_code = module_code + self.price_type = price_type def validate(self): pass @@ -2858,49 +9991,41 @@ def to_map(self): return _map result = dict() - if self.code is not None: - result['Code'] = self.code - if self.name is not None: - result['Name'] = self.name - if self.value is not None: - result['Value'] = self.value + if self.config is not None: + result['Config'] = self.config + if self.module_code is not None: + result['ModuleCode'] = self.module_code + if self.price_type is not None: + result['PriceType'] = self.price_type return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Code') is not None: - self.code = m.get('Code') - if m.get('Name') is not None: - self.name = m.get('Name') - if m.get('Value') is not None: - self.value = m.get('Value') + if m.get('Config') is not None: + self.config = m.get('Config') + if m.get('ModuleCode') is not None: + self.module_code = m.get('ModuleCode') + if m.get('PriceType') is not None: + self.price_type = m.get('PriceType') return self -class CreateProductOrdersRequestProducts(TeaModel): +class GetPayAsYouGoPriceRequest(TeaModel): def __init__( self, - auto_renew: bool = None, - charge_type: str = None, - duration: int = None, - instance_properties: List[CreateProductOrdersRequestProductsInstanceProperties] = None, - order_type: str = None, - pricing_cycle: str = None, + module_list: GetPayAsYouGoPriceRequestModuleList = None, product_code: str = None, + product_type: str = None, + subscription_type: str = None, ): - self.auto_renew = auto_renew - self.charge_type = charge_type - self.duration = duration - self.instance_properties = instance_properties - self.order_type = order_type - self.pricing_cycle = pricing_cycle + self.module_list = module_list self.product_code = product_code + self.product_type = product_type + self.subscription_type = subscription_type def validate(self): - if self.instance_properties: - for k in self.instance_properties: - if k: - k.validate() + if self.module_list: + self.module_list.validate() def to_map(self): _map = super().to_map() @@ -2908,176 +10033,40 @@ def to_map(self): return _map result = dict() - if self.auto_renew is not None: - result['AutoRenew'] = self.auto_renew - if self.charge_type is not None: - result['ChargeType'] = self.charge_type - if self.duration is not None: - result['Duration'] = self.duration - result['InstanceProperties'] = [] - if self.instance_properties is not None: - for k in self.instance_properties: - result['InstanceProperties'].append(k.to_map() if k else None) - if self.order_type is not None: - result['OrderType'] = self.order_type - if self.pricing_cycle is not None: - result['PricingCycle'] = self.pricing_cycle + if self.module_list is not None: + result['ModuleList'] = self.module_list.to_map() if self.product_code is not None: result['ProductCode'] = self.product_code + if self.product_type is not None: + result['ProductType'] = self.product_type + if self.subscription_type is not None: + result['SubscriptionType'] = self.subscription_type return result def from_map(self, m: dict = None): m = m or dict() - if m.get('AutoRenew') is not None: - self.auto_renew = m.get('AutoRenew') - if m.get('ChargeType') is not None: - self.charge_type = m.get('ChargeType') - if m.get('Duration') is not None: - self.duration = m.get('Duration') - self.instance_properties = [] - if m.get('InstanceProperties') is not None: - for k in m.get('InstanceProperties'): - temp_model = CreateProductOrdersRequestProductsInstanceProperties() - self.instance_properties.append(temp_model.from_map(k)) - if m.get('OrderType') is not None: - self.order_type = m.get('OrderType') - if m.get('PricingCycle') is not None: - self.pricing_cycle = m.get('PricingCycle') + if m.get('ModuleList') is not None: + temp_model = GetPayAsYouGoPriceRequestModuleList() + self.module_list = temp_model.from_map(m['ModuleList']) if m.get('ProductCode') is not None: self.product_code = m.get('ProductCode') + if m.get('ProductType') is not None: + self.product_type = m.get('ProductType') + if m.get('SubscriptionType') is not None: + self.subscription_type = m.get('SubscriptionType') return self -class CreateProductOrdersRequest(TeaModel): - def __init__( - self, - auto_pay: bool = None, - products: CreateProductOrdersRequestProducts = None, - ): - self.auto_pay = auto_pay - self.products = products - - def validate(self): - if self.products: - self.products.validate() - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.auto_pay is not None: - result['AutoPay'] = self.auto_pay - if self.products is not None: - result['Products'] = self.products.to_map() - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('AutoPay') is not None: - self.auto_pay = m.get('AutoPay') - if m.get('Products') is not None: - temp_model = CreateProductOrdersRequestProducts() - self.products = temp_model.from_map(m['Products']) - return self - - -class CreateProductOrdersResponseBody(TeaModel): - def __init__( - self, - buy_product_request_id: str = None, - message: str = None, - order_id: str = None, - request_id: str = None, - ): - self.buy_product_request_id = buy_product_request_id - self.message = message - self.order_id = order_id - self.request_id = request_id - - def validate(self): - pass - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.buy_product_request_id is not None: - result['BuyProductRequestId'] = self.buy_product_request_id - if self.message is not None: - result['Message'] = self.message - if self.order_id is not None: - result['OrderId'] = self.order_id - if self.request_id is not None: - result['RequestId'] = self.request_id - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('BuyProductRequestId') is not None: - self.buy_product_request_id = m.get('BuyProductRequestId') - if m.get('Message') is not None: - self.message = m.get('Message') - if m.get('OrderId') is not None: - self.order_id = m.get('OrderId') - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') - return self - - -class CreateProductOrdersResponse(TeaModel): - def __init__( - self, - headers: Dict[str, str] = None, - status_code: int = None, - body: CreateProductOrdersResponseBody = None, - ): - self.headers = headers - self.status_code = status_code - self.body = body - - def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = CreateProductOrdersResponseBody() - self.body = temp_model.from_map(m['body']) - return self - - -class CreateServiceIdentityRoleRequest(TeaModel): +class GetPayAsYouGoPriceResponseBodyModuleDetails(TeaModel): def __init__( self, - role_name: str = None, + module_code: str = None, + original_cost: float = None, + unit_price: float = None, ): - self.role_name = role_name + self.module_code = module_code + self.original_cost = original_cost + self.unit_price = unit_price def validate(self): pass @@ -3088,26 +10077,41 @@ def to_map(self): return _map result = dict() - if self.role_name is not None: - result['RoleName'] = self.role_name + if self.module_code is not None: + result['ModuleCode'] = self.module_code + if self.original_cost is not None: + result['OriginalCost'] = self.original_cost + if self.unit_price is not None: + result['UnitPrice'] = self.unit_price return result def from_map(self, m: dict = None): m = m or dict() - if m.get('RoleName') is not None: - self.role_name = m.get('RoleName') + if m.get('ModuleCode') is not None: + self.module_code = m.get('ModuleCode') + if m.get('OriginalCost') is not None: + self.original_cost = m.get('OriginalCost') + if m.get('UnitPrice') is not None: + self.unit_price = m.get('UnitPrice') return self -class CreateServiceIdentityRoleResponseBody(TeaModel): +class GetPayAsYouGoPriceResponseBody(TeaModel): def __init__( self, + currency: str = None, + module_details: List[GetPayAsYouGoPriceResponseBodyModuleDetails] = None, request_id: str = None, ): + self.currency = currency + self.module_details = module_details self.request_id = request_id def validate(self): - pass + if self.module_details: + for k in self.module_details: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -3115,32 +10119,42 @@ def to_map(self): return _map result = dict() + if self.currency is not None: + result['Currency'] = self.currency + result['ModuleDetails'] = [] + if self.module_details is not None: + for k in self.module_details: + result['ModuleDetails'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id return result def from_map(self, m: dict = None): m = m or dict() + if m.get('Currency') is not None: + self.currency = m.get('Currency') + self.module_details = [] + if m.get('ModuleDetails') is not None: + for k in m.get('ModuleDetails'): + temp_model = GetPayAsYouGoPriceResponseBodyModuleDetails() + self.module_details.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') return self -class CreateServiceIdentityRoleResponse(TeaModel): +class GetPayAsYouGoPriceResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: CreateServiceIdentityRoleResponseBody = None, + body: GetPayAsYouGoPriceResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -3165,19 +10179,23 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = CreateServiceIdentityRoleResponseBody() + temp_model = GetPayAsYouGoPriceResponseBody() self.body = temp_model.from_map(m['body']) return self -class CreateUserResponseBody(TeaModel): +class GetPermissionRequest(TeaModel): def __init__( self, - request_id: str = None, - user_id: str = None, + accessibility: str = None, + creator: str = None, + option: str = None, + resource: str = None, ): - self.request_id = request_id - self.user_id = user_id + self.accessibility = accessibility + self.creator = creator + self.option = option + self.resource = resource def validate(self): pass @@ -3188,77 +10206,37 @@ def to_map(self): return _map result = dict() - if self.request_id is not None: - result['RequestId'] = self.request_id - if self.user_id is not None: - result['UserId'] = self.user_id - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') - return self - - -class CreateUserResponse(TeaModel): - def __init__( - self, - headers: Dict[str, str] = None, - status_code: int = None, - body: CreateUserResponseBody = None, - ): - self.headers = headers - self.status_code = status_code - self.body = body - - def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + if self.creator is not None: + result['Creator'] = self.creator + if self.option is not None: + result['Option'] = self.option + if self.resource is not None: + result['Resource'] = self.resource return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = CreateUserResponseBody() - self.body = temp_model.from_map(m['body']) + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + if m.get('Creator') is not None: + self.creator = m.get('Creator') + if m.get('Option') is not None: + self.option = m.get('Option') + if m.get('Resource') is not None: + self.resource = m.get('Resource') return self -class CreateWorkspaceRequest(TeaModel): +class GetPermissionResponseBodyPermissionRules(TeaModel): def __init__( self, - description: str = None, - display_name: str = None, - env_types: List[str] = None, - workspace_name: str = None, + accessibility: str = None, + entity_access_type: str = None, ): - self.description = description - self.display_name = display_name - self.env_types = env_types - self.workspace_name = workspace_name + self.accessibility = accessibility + self.entity_access_type = entity_access_type def validate(self): pass @@ -3269,40 +10247,37 @@ def to_map(self): return _map result = dict() - if self.description is not None: - result['Description'] = self.description - if self.display_name is not None: - result['DisplayName'] = self.display_name - if self.env_types is not None: - result['EnvTypes'] = self.env_types - if self.workspace_name is not None: - result['WorkspaceName'] = self.workspace_name + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + if self.entity_access_type is not None: + result['EntityAccessType'] = self.entity_access_type return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Description') is not None: - self.description = m.get('Description') - if m.get('DisplayName') is not None: - self.display_name = m.get('DisplayName') - if m.get('EnvTypes') is not None: - self.env_types = m.get('EnvTypes') - if m.get('WorkspaceName') is not None: - self.workspace_name = m.get('WorkspaceName') + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + if m.get('EntityAccessType') is not None: + self.entity_access_type = m.get('EntityAccessType') return self -class CreateWorkspaceResponseBody(TeaModel): +class GetPermissionResponseBody(TeaModel): def __init__( self, + permission_code: str = None, + permission_rules: List[GetPermissionResponseBodyPermissionRules] = None, request_id: str = None, - workspace_id: str = None, ): + self.permission_code = permission_code + self.permission_rules = permission_rules self.request_id = request_id - self.workspace_id = workspace_id def validate(self): - pass + if self.permission_rules: + for k in self.permission_rules: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -3310,36 +10285,42 @@ def to_map(self): return _map result = dict() + if self.permission_code is not None: + result['PermissionCode'] = self.permission_code + result['PermissionRules'] = [] + if self.permission_rules is not None: + for k in self.permission_rules: + result['PermissionRules'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() + if m.get('PermissionCode') is not None: + self.permission_code = m.get('PermissionCode') + self.permission_rules = [] + if m.get('PermissionRules') is not None: + for k in m.get('PermissionRules'): + temp_model = GetPermissionResponseBodyPermissionRules() + self.permission_rules.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') return self -class CreateWorkspaceResponse(TeaModel): +class GetPermissionResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: CreateWorkspaceResponseBody = None, + body: GetPermissionResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -3364,20 +10345,98 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = CreateWorkspaceResponseBody() + temp_model = GetPermissionResponseBody() self.body = temp_model.from_map(m['body']) return self -class CreateWorkspaceResourceRequestResourcesQuotas(TeaModel): +class GetResourceRequest(TeaModel): + def __init__( + self, + resource_type: str = None, + ): + # This parameter is required. + self.resource_type = resource_type + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.resource_type is not None: + result['ResourceType'] = self.resource_type + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ResourceType') is not None: + self.resource_type = m.get('ResourceType') + return self + + +class GetResourceResponseBodyQuotasSpecs(TeaModel): + def __init__( + self, + name: str = None, + value: str = None, + ): + self.name = name + self.value = value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.name is not None: + result['Name'] = self.name + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + +class GetResourceResponseBodyQuotas(TeaModel): def __init__( self, + card_type: str = None, + display_name: str = None, id: str = None, + mode: str = None, + name: str = None, + product_code: str = None, + quota_type: str = None, + specs: List[GetResourceResponseBodyQuotasSpecs] = None, ): + self.card_type = card_type + self.display_name = display_name self.id = id + self.mode = mode + self.name = name + self.product_code = product_code + self.quota_type = quota_type + self.specs = specs def validate(self): - pass + if self.specs: + for k in self.specs: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -3385,38 +10444,77 @@ def to_map(self): return _map result = dict() + if self.card_type is not None: + result['CardType'] = self.card_type + if self.display_name is not None: + result['DisplayName'] = self.display_name if self.id is not None: result['Id'] = self.id + if self.mode is not None: + result['Mode'] = self.mode + if self.name is not None: + result['Name'] = self.name + if self.product_code is not None: + result['ProductCode'] = self.product_code + if self.quota_type is not None: + result['QuotaType'] = self.quota_type + result['Specs'] = [] + if self.specs is not None: + for k in self.specs: + result['Specs'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() + if m.get('CardType') is not None: + self.card_type = m.get('CardType') + if m.get('DisplayName') is not None: + self.display_name = m.get('DisplayName') if m.get('Id') is not None: self.id = m.get('Id') + if m.get('Mode') is not None: + self.mode = m.get('Mode') + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('ProductCode') is not None: + self.product_code = m.get('ProductCode') + if m.get('QuotaType') is not None: + self.quota_type = m.get('QuotaType') + self.specs = [] + if m.get('Specs') is not None: + for k in m.get('Specs'): + temp_model = GetResourceResponseBodyQuotasSpecs() + self.specs.append(temp_model.from_map(k)) return self -class CreateWorkspaceResourceRequestResources(TeaModel): +class GetResourceResponseBody(TeaModel): def __init__( self, env_type: str = None, + gmt_create_time: str = None, group_name: str = None, + id: str = None, is_default: bool = None, name: str = None, - product_type: str = None, - quotas: List[CreateWorkspaceResourceRequestResourcesQuotas] = None, + quotas: List[GetResourceResponseBodyQuotas] = None, + request_id: str = None, resource_type: str = None, spec: Dict[str, Any] = None, + status: str = None, workspace_id: str = None, ): self.env_type = env_type + self.gmt_create_time = gmt_create_time self.group_name = group_name + self.id = id self.is_default = is_default self.name = name - self.product_type = product_type self.quotas = quotas + self.request_id = request_id self.resource_type = resource_type self.spec = spec + self.status = status self.workspace_id = workspace_id def validate(self): @@ -3433,22 +10531,28 @@ def to_map(self): result = dict() if self.env_type is not None: result['EnvType'] = self.env_type + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time if self.group_name is not None: result['GroupName'] = self.group_name + if self.id is not None: + result['Id'] = self.id if self.is_default is not None: result['IsDefault'] = self.is_default if self.name is not None: result['Name'] = self.name - if self.product_type is not None: - result['ProductType'] = self.product_type result['Quotas'] = [] if self.quotas is not None: for k in self.quotas: result['Quotas'].append(k.to_map() if k else None) + if self.request_id is not None: + result['RequestId'] = self.request_id if self.resource_type is not None: result['ResourceType'] = self.resource_type if self.spec is not None: result['Spec'] = self.spec + if self.status is not None: + result['Status'] = self.status if self.workspace_id is not None: result['WorkspaceId'] = self.workspace_id return result @@ -3457,42 +10561,48 @@ def from_map(self, m: dict = None): m = m or dict() if m.get('EnvType') is not None: self.env_type = m.get('EnvType') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') if m.get('GroupName') is not None: self.group_name = m.get('GroupName') + if m.get('Id') is not None: + self.id = m.get('Id') if m.get('IsDefault') is not None: self.is_default = m.get('IsDefault') if m.get('Name') is not None: self.name = m.get('Name') - if m.get('ProductType') is not None: - self.product_type = m.get('ProductType') self.quotas = [] if m.get('Quotas') is not None: for k in m.get('Quotas'): - temp_model = CreateWorkspaceResourceRequestResourcesQuotas() + temp_model = GetResourceResponseBodyQuotas() self.quotas.append(temp_model.from_map(k)) + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') if m.get('ResourceType') is not None: self.resource_type = m.get('ResourceType') if m.get('Spec') is not None: self.spec = m.get('Spec') + if m.get('Status') is not None: + self.status = m.get('Status') if m.get('WorkspaceId') is not None: self.workspace_id = m.get('WorkspaceId') return self -class CreateWorkspaceResourceRequest(TeaModel): +class GetResourceResponse(TeaModel): def __init__( self, - option: str = None, - resources: List[CreateWorkspaceResourceRequestResources] = None, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetResourceResponseBody = None, ): - self.option = option - self.resources = resources + self.headers = headers + self.status_code = status_code + self.body = body def validate(self): - if self.resources: - for k in self.resources: - if k: - k.validate() + if self.body: + self.body.validate() def to_map(self): _map = super().to_map() @@ -3500,32 +10610,32 @@ def to_map(self): return _map result = dict() - if self.option is not None: - result['Option'] = self.option - result['Resources'] = [] - if self.resources is not None: - for k in self.resources: - result['Resources'].append(k.to_map() if k else None) + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Option') is not None: - self.option = m.get('Option') - self.resources = [] - if m.get('Resources') is not None: - for k in m.get('Resources'): - temp_model = CreateWorkspaceResourceRequestResources() - self.resources.append(temp_model.from_map(k)) + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetResourceResponseBody() + self.body = temp_model.from_map(m['body']) return self -class CreateWorkspaceResourceResponseBodyResources(TeaModel): +class GetRoleStatisticsRequest(TeaModel): def __init__( self, - id: str = None, + workspace_id: str = None, ): - self.id = id + self.workspace_id = workspace_id def validate(self): pass @@ -3536,81 +10646,28 @@ def to_map(self): return _map result = dict() - if self.id is not None: - result['Id'] = self.id - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('Id') is not None: - self.id = m.get('Id') - return self - - -class CreateWorkspaceResourceResponseBody(TeaModel): - def __init__( - self, - request_id: str = None, - resources: List[CreateWorkspaceResourceResponseBodyResources] = None, - total_count: int = None, - ): - self.request_id = request_id - self.resources = resources - self.total_count = total_count - - def validate(self): - if self.resources: - for k in self.resources: - if k: - k.validate() - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.request_id is not None: - result['RequestId'] = self.request_id - result['Resources'] = [] - if self.resources is not None: - for k in self.resources: - result['Resources'].append(k.to_map() if k else None) - if self.total_count is not None: - result['TotalCount'] = self.total_count + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') - self.resources = [] - if m.get('Resources') is not None: - for k in m.get('Resources'): - temp_model = CreateWorkspaceResourceResponseBodyResources() - self.resources.append(temp_model.from_map(k)) - if m.get('TotalCount') is not None: - self.total_count = m.get('TotalCount') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class CreateWorkspaceResourceResponse(TeaModel): +class GetRoleStatisticsResponseBodyRoles(TeaModel): def __init__( self, - headers: Dict[str, str] = None, - status_code: int = None, - body: CreateWorkspaceResourceResponseBody = None, + member_size: int = None, + role_name: str = None, ): - self.headers = headers - self.status_code = status_code - self.body = body + self.member_size = member_size + self.role_name = role_name def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() + pass def to_map(self): _map = super().to_map() @@ -3618,37 +10675,37 @@ def to_map(self): return _map result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + if self.member_size is not None: + result['MemberSize'] = self.member_size + if self.role_name is not None: + result['RoleName'] = self.role_name return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = CreateWorkspaceResourceResponseBody() - self.body = temp_model.from_map(m['body']) + if m.get('MemberSize') is not None: + self.member_size = m.get('MemberSize') + if m.get('RoleName') is not None: + self.role_name = m.get('RoleName') return self -class DeleteCodeSourceResponseBody(TeaModel): +class GetRoleStatisticsResponseBody(TeaModel): def __init__( self, - code_source_id: str = None, request_id: str = None, + roles: List[GetRoleStatisticsResponseBodyRoles] = None, + total_count: int = None, ): - self.code_source_id = code_source_id self.request_id = request_id + self.roles = roles + self.total_count = total_count def validate(self): - pass + if self.roles: + for k in self.roles: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -3656,36 +10713,42 @@ def to_map(self): return _map result = dict() - if self.code_source_id is not None: - result['CodeSourceId'] = self.code_source_id if self.request_id is not None: result['RequestId'] = self.request_id + result['Roles'] = [] + if self.roles is not None: + for k in self.roles: + result['Roles'].append(k.to_map() if k else None) + if self.total_count is not None: + result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() - if m.get('CodeSourceId') is not None: - self.code_source_id = m.get('CodeSourceId') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + self.roles = [] + if m.get('Roles') is not None: + for k in m.get('Roles'): + temp_model = GetRoleStatisticsResponseBodyRoles() + self.roles.append(temp_model.from_map(k)) + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') return self -class DeleteCodeSourceResponse(TeaModel): +class GetRoleStatisticsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: DeleteCodeSourceResponseBody = None, + body: GetRoleStatisticsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -3710,20 +10773,45 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = DeleteCodeSourceResponseBody() + temp_model = GetRoleStatisticsResponseBody() self.body = temp_model.from_map(m['body']) return self -class DeleteConfigResponseBody(TeaModel): +class GetServiceTemplateResponseBody(TeaModel): def __init__( self, + gmt_create_time: str = None, + gmt_modified_time: str = None, + inference_spec: Dict[str, Any] = None, + labels: List[Label] = None, + owner_id: str = None, + provider: str = None, request_id: str = None, + service_template_description: str = None, + service_template_doc: str = None, + service_template_id: str = None, + service_template_name: str = None, + user_id: str = None, ): + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.inference_spec = inference_spec + self.labels = labels + self.owner_id = owner_id + self.provider = provider self.request_id = request_id + self.service_template_description = service_template_description + self.service_template_doc = service_template_doc + self.service_template_id = service_template_id + self.service_template_name = service_template_name + self.user_id = user_id def validate(self): - pass + if self.labels: + for k in self.labels: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -3731,32 +10819,78 @@ def to_map(self): return _map result = dict() + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.inference_spec is not None: + result['InferenceSpec'] = self.inference_spec + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.owner_id is not None: + result['OwnerId'] = self.owner_id + if self.provider is not None: + result['Provider'] = self.provider if self.request_id is not None: result['RequestId'] = self.request_id + if self.service_template_description is not None: + result['ServiceTemplateDescription'] = self.service_template_description + if self.service_template_doc is not None: + result['ServiceTemplateDoc'] = self.service_template_doc + if self.service_template_id is not None: + result['ServiceTemplateId'] = self.service_template_id + if self.service_template_name is not None: + result['ServiceTemplateName'] = self.service_template_name + if self.user_id is not None: + result['UserId'] = self.user_id return result def from_map(self, m: dict = None): m = m or dict() + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('InferenceSpec') is not None: + self.inference_spec = m.get('InferenceSpec') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = Label() + self.labels.append(temp_model.from_map(k)) + if m.get('OwnerId') is not None: + self.owner_id = m.get('OwnerId') + if m.get('Provider') is not None: + self.provider = m.get('Provider') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('ServiceTemplateDescription') is not None: + self.service_template_description = m.get('ServiceTemplateDescription') + if m.get('ServiceTemplateDoc') is not None: + self.service_template_doc = m.get('ServiceTemplateDoc') + if m.get('ServiceTemplateId') is not None: + self.service_template_id = m.get('ServiceTemplateId') + if m.get('ServiceTemplateName') is not None: + self.service_template_name = m.get('ServiceTemplateName') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') return self -class DeleteConfigResponse(TeaModel): +class GetServiceTemplateResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: DeleteConfigResponseBody = None, + body: GetServiceTemplateResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -3781,17 +10915,35 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = DeleteConfigResponseBody() + temp_model = GetServiceTemplateResponseBody() self.body = temp_model.from_map(m['body']) return self -class DeleteDatasetResponseBody(TeaModel): +class GetTrialResponseBody(TeaModel): def __init__( self, + accessibility: str = None, + experiment_id: str = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + name: str = None, request_id: str = None, + source_id: str = None, + source_type: str = None, + trial_id: str = None, + workspace_id: str = None, ): + self.accessibility = accessibility + self.experiment_id = experiment_id + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.name = name self.request_id = request_id + self.source_id = source_id + self.source_type = source_type + self.trial_id = trial_id + self.workspace_id = workspace_id def validate(self): pass @@ -3802,32 +10954,65 @@ def to_map(self): return _map result = dict() + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + if self.experiment_id is not None: + result['ExperimentId'] = self.experiment_id + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.name is not None: + result['Name'] = self.name if self.request_id is not None: result['RequestId'] = self.request_id + if self.source_id is not None: + result['SourceId'] = self.source_id + if self.source_type is not None: + result['SourceType'] = self.source_type + if self.trial_id is not None: + result['TrialId'] = self.trial_id + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + if m.get('ExperimentId') is not None: + self.experiment_id = m.get('ExperimentId') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('Name') is not None: + self.name = m.get('Name') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('SourceId') is not None: + self.source_id = m.get('SourceId') + if m.get('SourceType') is not None: + self.source_type = m.get('SourceType') + if m.get('TrialId') is not None: + self.trial_id = m.get('TrialId') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class DeleteDatasetResponse(TeaModel): +class GetTrialResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: DeleteDatasetResponseBody = None, + body: GetTrialResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -3852,50 +11037,17 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = DeleteDatasetResponseBody() + temp_model = GetTrialResponseBody() self.body = temp_model.from_map(m['body']) return self -class DeleteDatasetLabelsRequest(TeaModel): - def __init__( - self, - keys: str = None, - label_keys: str = None, - ): - self.keys = keys - self.label_keys = label_keys - - def validate(self): - pass - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.keys is not None: - result['Keys'] = self.keys - if self.label_keys is not None: - result['LabelKeys'] = self.label_keys - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('Keys') is not None: - self.keys = m.get('Keys') - if m.get('LabelKeys') is not None: - self.label_keys = m.get('LabelKeys') - return self - - -class DeleteDatasetLabelsResponseBody(TeaModel): +class GetWorkspaceRequest(TeaModel): def __init__( self, - request_id: str = None, + verbose: bool = None, ): - self.request_id = request_id + self.verbose = verbose def validate(self): pass @@ -3906,67 +11058,29 @@ def to_map(self): return _map result = dict() - if self.request_id is not None: - result['RequestId'] = self.request_id - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') - return self - - -class DeleteDatasetLabelsResponse(TeaModel): - def __init__( - self, - headers: Dict[str, str] = None, - status_code: int = None, - body: DeleteDatasetLabelsResponseBody = None, - ): - self.headers = headers - self.status_code = status_code - self.body = body - - def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + if self.verbose is not None: + result['Verbose'] = self.verbose return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = DeleteDatasetLabelsResponseBody() - self.body = temp_model.from_map(m['body']) + if m.get('Verbose') is not None: + self.verbose = m.get('Verbose') return self -class DeleteMembersRequest(TeaModel): +class GetWorkspaceResponseBodyOwner(TeaModel): def __init__( self, - member_ids: str = None, + display_name: str = None, + user_id: str = None, + user_kp: str = None, + user_name: str = None, ): - self.member_ids = member_ids + self.display_name = display_name + self.user_id = user_id + self.user_kp = user_kp + self.user_name = user_name def validate(self): pass @@ -3977,26 +11091,65 @@ def to_map(self): return _map result = dict() - if self.member_ids is not None: - result['MemberIds'] = self.member_ids + if self.display_name is not None: + result['DisplayName'] = self.display_name + if self.user_id is not None: + result['UserId'] = self.user_id + if self.user_kp is not None: + result['UserKp'] = self.user_kp + if self.user_name is not None: + result['UserName'] = self.user_name return result def from_map(self, m: dict = None): m = m or dict() - if m.get('MemberIds') is not None: - self.member_ids = m.get('MemberIds') + if m.get('DisplayName') is not None: + self.display_name = m.get('DisplayName') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('UserKp') is not None: + self.user_kp = m.get('UserKp') + if m.get('UserName') is not None: + self.user_name = m.get('UserName') return self -class DeleteMembersResponseBody(TeaModel): +class GetWorkspaceResponseBody(TeaModel): def __init__( self, + admin_names: List[str] = None, + creator: str = None, + description: str = None, + display_name: str = None, + env_types: List[str] = None, + extra_infos: Dict[str, Any] = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + is_default: bool = None, + owner: GetWorkspaceResponseBodyOwner = None, request_id: str = None, + status: str = None, + workspace_id: str = None, + workspace_name: str = None, ): + self.admin_names = admin_names + self.creator = creator + self.description = description + self.display_name = display_name + self.env_types = env_types + self.extra_infos = extra_infos + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.is_default = is_default + self.owner = owner self.request_id = request_id + self.status = status + self.workspace_id = workspace_id + self.workspace_name = workspace_name def validate(self): - pass + if self.owner: + self.owner.validate() def to_map(self): _map = super().to_map() @@ -4004,32 +11157,82 @@ def to_map(self): return _map result = dict() + if self.admin_names is not None: + result['AdminNames'] = self.admin_names + if self.creator is not None: + result['Creator'] = self.creator + if self.description is not None: + result['Description'] = self.description + if self.display_name is not None: + result['DisplayName'] = self.display_name + if self.env_types is not None: + result['EnvTypes'] = self.env_types + if self.extra_infos is not None: + result['ExtraInfos'] = self.extra_infos + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.is_default is not None: + result['IsDefault'] = self.is_default + if self.owner is not None: + result['Owner'] = self.owner.to_map() if self.request_id is not None: result['RequestId'] = self.request_id + if self.status is not None: + result['Status'] = self.status + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id + if self.workspace_name is not None: + result['WorkspaceName'] = self.workspace_name return result def from_map(self, m: dict = None): m = m or dict() + if m.get('AdminNames') is not None: + self.admin_names = m.get('AdminNames') + if m.get('Creator') is not None: + self.creator = m.get('Creator') + if m.get('Description') is not None: + self.description = m.get('Description') + if m.get('DisplayName') is not None: + self.display_name = m.get('DisplayName') + if m.get('EnvTypes') is not None: + self.env_types = m.get('EnvTypes') + if m.get('ExtraInfos') is not None: + self.extra_infos = m.get('ExtraInfos') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('IsDefault') is not None: + self.is_default = m.get('IsDefault') + if m.get('Owner') is not None: + temp_model = GetWorkspaceResponseBodyOwner() + self.owner = temp_model.from_map(m['Owner']) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('Status') is not None: + self.status = m.get('Status') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + if m.get('WorkspaceName') is not None: + self.workspace_name = m.get('WorkspaceName') return self -class DeleteMembersResponse(TeaModel): +class GetWorkspaceResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: DeleteMembersResponseBody = None, + body: GetWorkspaceResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -4054,17 +11257,19 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = DeleteMembersResponseBody() + temp_model = GetWorkspaceResponseBody() self.body = temp_model.from_map(m['body']) return self -class DeleteModelResponseBody(TeaModel): +class GetWorkspaceRoleResponseBodyModulePermissionsPermissionsPermissionRules(TeaModel): def __init__( self, - request_id: str = None, + accessibility: str = None, + entity_access_type: str = None, ): - self.request_id = request_id + self.accessibility = accessibility + self.entity_access_type = entity_access_type def validate(self): pass @@ -4075,34 +11280,35 @@ def to_map(self): return _map result = dict() - if self.request_id is not None: - result['RequestId'] = self.request_id + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + if self.entity_access_type is not None: + result['EntityAccessType'] = self.entity_access_type return result def from_map(self, m: dict = None): m = m or dict() - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + if m.get('EntityAccessType') is not None: + self.entity_access_type = m.get('EntityAccessType') return self -class DeleteModelResponse(TeaModel): +class GetWorkspaceRoleResponseBodyModulePermissionsPermissions(TeaModel): def __init__( self, - headers: Dict[str, str] = None, - status_code: int = None, - body: DeleteModelResponseBody = None, + permission_codes: List[str] = None, + permission_rules: List[GetWorkspaceRoleResponseBodyModulePermissionsPermissionsPermissionRules] = None, ): - self.headers = headers - self.status_code = status_code - self.body = body + self.permission_codes = permission_codes + self.permission_rules = permission_rules def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() + if self.permission_rules: + for k in self.permission_rules: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -4110,35 +11316,42 @@ def to_map(self): return _map result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + if self.permission_codes is not None: + result['PermissionCodes'] = self.permission_codes + result['PermissionRules'] = [] + if self.permission_rules is not None: + for k in self.permission_rules: + result['PermissionRules'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = DeleteModelResponseBody() - self.body = temp_model.from_map(m['body']) + if m.get('PermissionCodes') is not None: + self.permission_codes = m.get('PermissionCodes') + self.permission_rules = [] + if m.get('PermissionRules') is not None: + for k in m.get('PermissionRules'): + temp_model = GetWorkspaceRoleResponseBodyModulePermissionsPermissionsPermissionRules() + self.permission_rules.append(temp_model.from_map(k)) return self -class DeleteModelDomainRequest(TeaModel): +class GetWorkspaceRoleResponseBodyModulePermissions(TeaModel): def __init__( self, - model_task_ids: str = None, + module_name: str = None, + permission_type: str = None, + permissions: List[GetWorkspaceRoleResponseBodyModulePermissionsPermissions] = None, ): - self.model_task_ids = model_task_ids + self.module_name = module_name + self.permission_type = permission_type + self.permissions = permissions def validate(self): - pass + if self.permissions: + for k in self.permissions: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -4146,26 +11359,56 @@ def to_map(self): return _map result = dict() - if self.model_task_ids is not None: - result['ModelTaskIds'] = self.model_task_ids + if self.module_name is not None: + result['ModuleName'] = self.module_name + if self.permission_type is not None: + result['PermissionType'] = self.permission_type + result['Permissions'] = [] + if self.permissions is not None: + for k in self.permissions: + result['Permissions'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ModelTaskIds') is not None: - self.model_task_ids = m.get('ModelTaskIds') + if m.get('ModuleName') is not None: + self.module_name = m.get('ModuleName') + if m.get('PermissionType') is not None: + self.permission_type = m.get('PermissionType') + self.permissions = [] + if m.get('Permissions') is not None: + for k in m.get('Permissions'): + temp_model = GetWorkspaceRoleResponseBodyModulePermissionsPermissions() + self.permissions.append(temp_model.from_map(k)) return self -class DeleteModelDomainResponseBody(TeaModel): +class GetWorkspaceRoleResponseBody(TeaModel): def __init__( self, + creator: str = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + module_permissions: List[GetWorkspaceRoleResponseBodyModulePermissions] = None, request_id: str = None, + role_id: str = None, + role_name: str = None, + status: str = None, ): + self.creator = creator + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.module_permissions = module_permissions self.request_id = request_id + self.role_id = role_id + self.role_name = role_name + self.status = status def validate(self): - pass + if self.module_permissions: + for k in self.module_permissions: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -4173,32 +11416,62 @@ def to_map(self): return _map result = dict() + if self.creator is not None: + result['Creator'] = self.creator + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + result['ModulePermissions'] = [] + if self.module_permissions is not None: + for k in self.module_permissions: + result['ModulePermissions'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id + if self.role_id is not None: + result['RoleId'] = self.role_id + if self.role_name is not None: + result['RoleName'] = self.role_name + if self.status is not None: + result['Status'] = self.status return result def from_map(self, m: dict = None): m = m or dict() + if m.get('Creator') is not None: + self.creator = m.get('Creator') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + self.module_permissions = [] + if m.get('ModulePermissions') is not None: + for k in m.get('ModulePermissions'): + temp_model = GetWorkspaceRoleResponseBodyModulePermissions() + self.module_permissions.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('RoleId') is not None: + self.role_id = m.get('RoleId') + if m.get('RoleName') is not None: + self.role_name = m.get('RoleName') + if m.get('Status') is not None: + self.status = m.get('Status') return self -class DeleteModelDomainResponse(TeaModel): +class GetWorkspaceRoleResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: DeleteModelDomainResponseBody = None, + body: GetWorkspaceRoleResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -4223,50 +11496,27 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = DeleteModelDomainResponseBody() + temp_model = GetWorkspaceRoleResponseBody() self.body = temp_model.from_map(m['body']) return self -class DeleteModelLabelsRequest(TeaModel): - def __init__( - self, - keys: str = None, - label_keys: str = None, - ): - self.keys = keys - self.label_keys = label_keys - - def validate(self): - pass - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.keys is not None: - result['Keys'] = self.keys - if self.label_keys is not None: - result['LabelKeys'] = self.label_keys - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('Keys') is not None: - self.keys = m.get('Keys') - if m.get('LabelKeys') is not None: - self.label_keys = m.get('LabelKeys') - return self - - -class DeleteModelLabelsResponseBody(TeaModel): +class ListCodeSourcesRequest(TeaModel): def __init__( self, - request_id: str = None, + display_name: str = None, + order: str = None, + page_number: int = None, + page_size: int = None, + sort_by: str = None, + workspace_id: str = None, ): - self.request_id = request_id + self.display_name = display_name + self.order = order + self.page_number = page_number + self.page_size = page_size + self.sort_by = sort_by + self.workspace_id = workspace_id def validate(self): pass @@ -4277,70 +11527,53 @@ def to_map(self): return _map result = dict() - if self.request_id is not None: - result['RequestId'] = self.request_id - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') - return self - - -class DeleteModelLabelsResponse(TeaModel): - def __init__( - self, - headers: Dict[str, str] = None, - status_code: int = None, - body: DeleteModelLabelsResponseBody = None, - ): - self.headers = headers - self.status_code = status_code - self.body = body - - def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + if self.display_name is not None: + result['DisplayName'] = self.display_name + if self.order is not None: + result['Order'] = self.order + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.sort_by is not None: + result['SortBy'] = self.sort_by + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = DeleteModelLabelsResponseBody() - self.body = temp_model.from_map(m['body']) + if m.get('DisplayName') is not None: + self.display_name = m.get('DisplayName') + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class DeleteModelVersionResponseBody(TeaModel): +class ListCodeSourcesResponseBody(TeaModel): def __init__( self, + code_sources: List[CodeSourceItem] = None, request_id: str = None, + total_count: int = None, ): + self.code_sources = code_sources self.request_id = request_id + self.total_count = total_count def validate(self): - pass + if self.code_sources: + for k in self.code_sources: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -4348,32 +11581,42 @@ def to_map(self): return _map result = dict() + result['CodeSources'] = [] + if self.code_sources is not None: + for k in self.code_sources: + result['CodeSources'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id + if self.total_count is not None: + result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() + self.code_sources = [] + if m.get('CodeSources') is not None: + for k in m.get('CodeSources'): + temp_model = CodeSourceItem() + self.code_sources.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') return self -class DeleteModelVersionResponse(TeaModel): +class ListCodeSourcesResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: DeleteModelVersionResponseBody = None, + body: ListCodeSourcesResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -4398,19 +11641,19 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = DeleteModelVersionResponseBody() + temp_model = ListCodeSourcesResponseBody() self.body = temp_model.from_map(m['body']) return self -class DeleteModelVersionLabelsRequest(TeaModel): +class ListCollectionsRequest(TeaModel): def __init__( self, - keys: str = None, - label_keys: str = None, + page_number: int = None, + page_size: int = None, ): - self.keys = keys - self.label_keys = label_keys + self.page_number = page_number + self.page_size = page_size def validate(self): pass @@ -4421,30 +11664,37 @@ def to_map(self): return _map result = dict() - if self.keys is not None: - result['Keys'] = self.keys - if self.label_keys is not None: - result['LabelKeys'] = self.label_keys + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Keys') is not None: - self.keys = m.get('Keys') - if m.get('LabelKeys') is not None: - self.label_keys = m.get('LabelKeys') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') return self -class DeleteModelVersionLabelsResponseBody(TeaModel): +class ListCollectionsResponseBody(TeaModel): def __init__( self, + collections: List[Collection] = None, request_id: str = None, + total_count: int = None, ): + self.collections = collections self.request_id = request_id + self.total_count = total_count def validate(self): - pass + if self.collections: + for k in self.collections: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -4452,32 +11702,42 @@ def to_map(self): return _map result = dict() + result['Collections'] = [] + if self.collections is not None: + for k in self.collections: + result['Collections'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id + if self.total_count is not None: + result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() + self.collections = [] + if m.get('Collections') is not None: + for k in m.get('Collections'): + temp_model = Collection() + self.collections.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') return self -class DeleteModelVersionLabelsResponse(TeaModel): +class ListCollectionsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: DeleteModelVersionLabelsResponseBody = None, + body: ListCollectionsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -4502,17 +11762,21 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = DeleteModelVersionLabelsResponseBody() + temp_model = ListCollectionsResponseBody() self.body = temp_model.from_map(m['body']) return self -class DeleteWorkspaceResponseBody(TeaModel): +class ListConfigsRequest(TeaModel): def __init__( self, - request_id: str = None, + config_keys: str = None, + labels: str = None, + verbose: str = None, ): - self.request_id = request_id + self.config_keys = config_keys + self.labels = labels + self.verbose = verbose def validate(self): pass @@ -4523,34 +11787,36 @@ def to_map(self): return _map result = dict() - if self.request_id is not None: - result['RequestId'] = self.request_id + if self.config_keys is not None: + result['ConfigKeys'] = self.config_keys + if self.labels is not None: + result['Labels'] = self.labels + if self.verbose is not None: + result['Verbose'] = self.verbose return result def from_map(self, m: dict = None): m = m or dict() - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') + if m.get('ConfigKeys') is not None: + self.config_keys = m.get('ConfigKeys') + if m.get('Labels') is not None: + self.labels = m.get('Labels') + if m.get('Verbose') is not None: + self.verbose = m.get('Verbose') return self -class DeleteWorkspaceResponse(TeaModel): +class ListConfigsResponseBodyConfigsLabels(TeaModel): def __init__( self, - headers: Dict[str, str] = None, - status_code: int = None, - body: DeleteWorkspaceResponseBody = None, + key: str = None, + value: str = None, ): - self.headers = headers - self.status_code = status_code - self.body = body + self.key = key + self.value = value def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() + pass def to_map(self): _map = super().to_map() @@ -4558,41 +11824,39 @@ def to_map(self): return _map result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = DeleteWorkspaceResponseBody() - self.body = temp_model.from_map(m['body']) + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') return self -class DeleteWorkspaceResourceRequest(TeaModel): +class ListConfigsResponseBodyConfigs(TeaModel): def __init__( self, - group_name: str = None, - option: str = None, - product_type: str = None, - resource_type: str = None, + category_name: str = None, + config_key: str = None, + config_value: str = None, + labels: List[ListConfigsResponseBodyConfigsLabels] = None, ): - self.group_name = group_name - self.option = option - self.product_type = product_type - self.resource_type = resource_type + self.category_name = category_name + self.config_key = config_key + self.config_value = config_value + self.labels = labels def validate(self): - pass + if self.labels: + for k in self.labels: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -4600,38 +11864,50 @@ def to_map(self): return _map result = dict() - if self.group_name is not None: - result['GroupName'] = self.group_name - if self.option is not None: - result['Option'] = self.option - if self.product_type is not None: - result['ProductType'] = self.product_type - if self.resource_type is not None: - result['ResourceType'] = self.resource_type + if self.category_name is not None: + result['CategoryName'] = self.category_name + if self.config_key is not None: + result['ConfigKey'] = self.config_key + if self.config_value is not None: + result['ConfigValue'] = self.config_value + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() - if m.get('GroupName') is not None: - self.group_name = m.get('GroupName') - if m.get('Option') is not None: - self.option = m.get('Option') - if m.get('ProductType') is not None: - self.product_type = m.get('ProductType') - if m.get('ResourceType') is not None: - self.resource_type = m.get('ResourceType') + if m.get('CategoryName') is not None: + self.category_name = m.get('CategoryName') + if m.get('ConfigKey') is not None: + self.config_key = m.get('ConfigKey') + if m.get('ConfigValue') is not None: + self.config_value = m.get('ConfigValue') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = ListConfigsResponseBodyConfigsLabels() + self.labels.append(temp_model.from_map(k)) return self -class DeleteWorkspaceResourceResponseBody(TeaModel): +class ListConfigsResponseBody(TeaModel): def __init__( self, + configs: List[ListConfigsResponseBodyConfigs] = None, request_id: str = None, + total_count: int = None, ): + self.configs = configs self.request_id = request_id + self.total_count = total_count def validate(self): - pass + if self.configs: + for k in self.configs: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -4639,34 +11915,100 @@ def to_map(self): return _map result = dict() + result['Configs'] = [] + if self.configs is not None: + for k in self.configs: + result['Configs'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id + if self.total_count is not None: + result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() + self.configs = [] + if m.get('Configs') is not None: + for k in m.get('Configs'): + temp_model = ListConfigsResponseBodyConfigs() + self.configs.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') return self -class DeleteWorkspaceResourceResponse(TeaModel): +class ListConfigsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: DeleteWorkspaceResourceResponseBody = None, + body: ListConfigsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = ListConfigsResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class ListDatasetVersionsRequest(TeaModel): + def __init__( + self, + data_sources_types: str = None, + label_keys: str = None, + lable_values: str = None, + order: str = None, + page_number: int = None, + page_size: int = None, + properties: str = None, + sort_by: str = None, + source_id: str = None, + source_types: str = None, + ): + self.data_sources_types = data_sources_types + self.label_keys = label_keys + self.lable_values = lable_values + self.order = order + # This parameter is required. + self.page_number = page_number + # This parameter is required. + self.page_size = page_size + self.properties = properties + self.sort_by = sort_by + self.source_id = source_id + self.source_types = source_types + + def validate(self): + pass def to_map(self): _map = super().to_map() @@ -4674,63 +12016,73 @@ def to_map(self): return _map result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + if self.data_sources_types is not None: + result['DataSourcesTypes'] = self.data_sources_types + if self.label_keys is not None: + result['LabelKeys'] = self.label_keys + if self.lable_values is not None: + result['LableValues'] = self.lable_values + if self.order is not None: + result['Order'] = self.order + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.properties is not None: + result['Properties'] = self.properties + if self.sort_by is not None: + result['SortBy'] = self.sort_by + if self.source_id is not None: + result['SourceId'] = self.source_id + if self.source_types is not None: + result['SourceTypes'] = self.source_types return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = DeleteWorkspaceResourceResponseBody() - self.body = temp_model.from_map(m['body']) + if m.get('DataSourcesTypes') is not None: + self.data_sources_types = m.get('DataSourcesTypes') + if m.get('LabelKeys') is not None: + self.label_keys = m.get('LabelKeys') + if m.get('LableValues') is not None: + self.lable_values = m.get('LableValues') + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('Properties') is not None: + self.properties = m.get('Properties') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') + if m.get('SourceId') is not None: + self.source_id = m.get('SourceId') + if m.get('SourceTypes') is not None: + self.source_types = m.get('SourceTypes') return self -class GetCodeSourceResponseBody(TeaModel): +class ListDatasetVersionsResponseBody(TeaModel): def __init__( self, - accessibility: str = None, - code_branch: str = None, - code_commit: str = None, - code_repo: str = None, - code_repo_access_token: str = None, - code_repo_user_name: str = None, - code_source_id: str = None, - description: str = None, - display_name: str = None, - gmt_create_time: str = None, - gmt_modify_time: str = None, - mount_path: str = None, + dataset_versions: List[DatasetVersion] = None, + page_number: int = None, + page_size: int = None, request_id: str = None, - user_id: str = None, - workspace_id: str = None, + total_count: int = None, ): - self.accessibility = accessibility - self.code_branch = code_branch - self.code_commit = code_commit - self.code_repo = code_repo - self.code_repo_access_token = code_repo_access_token - self.code_repo_user_name = code_repo_user_name - self.code_source_id = code_source_id - self.description = description - self.display_name = display_name - self.gmt_create_time = gmt_create_time - self.gmt_modify_time = gmt_modify_time - self.mount_path = mount_path + self.dataset_versions = dataset_versions + self.page_number = page_number + self.page_size = page_size self.request_id = request_id - self.user_id = user_id - self.workspace_id = workspace_id + self.total_count = total_count def validate(self): - pass + if self.dataset_versions: + for k in self.dataset_versions: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -4738,88 +12090,50 @@ def to_map(self): return _map result = dict() - if self.accessibility is not None: - result['Accessibility'] = self.accessibility - if self.code_branch is not None: - result['CodeBranch'] = self.code_branch - if self.code_commit is not None: - result['CodeCommit'] = self.code_commit - if self.code_repo is not None: - result['CodeRepo'] = self.code_repo - if self.code_repo_access_token is not None: - result['CodeRepoAccessToken'] = self.code_repo_access_token - if self.code_repo_user_name is not None: - result['CodeRepoUserName'] = self.code_repo_user_name - if self.code_source_id is not None: - result['CodeSourceId'] = self.code_source_id - if self.description is not None: - result['Description'] = self.description - if self.display_name is not None: - result['DisplayName'] = self.display_name - if self.gmt_create_time is not None: - result['GmtCreateTime'] = self.gmt_create_time - if self.gmt_modify_time is not None: - result['GmtModifyTime'] = self.gmt_modify_time - if self.mount_path is not None: - result['MountPath'] = self.mount_path + result['DatasetVersions'] = [] + if self.dataset_versions is not None: + for k in self.dataset_versions: + result['DatasetVersions'].append(k.to_map() if k else None) + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size if self.request_id is not None: result['RequestId'] = self.request_id - if self.user_id is not None: - result['UserId'] = self.user_id - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.total_count is not None: + result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Accessibility') is not None: - self.accessibility = m.get('Accessibility') - if m.get('CodeBranch') is not None: - self.code_branch = m.get('CodeBranch') - if m.get('CodeCommit') is not None: - self.code_commit = m.get('CodeCommit') - if m.get('CodeRepo') is not None: - self.code_repo = m.get('CodeRepo') - if m.get('CodeRepoAccessToken') is not None: - self.code_repo_access_token = m.get('CodeRepoAccessToken') - if m.get('CodeRepoUserName') is not None: - self.code_repo_user_name = m.get('CodeRepoUserName') - if m.get('CodeSourceId') is not None: - self.code_source_id = m.get('CodeSourceId') - if m.get('Description') is not None: - self.description = m.get('Description') - if m.get('DisplayName') is not None: - self.display_name = m.get('DisplayName') - if m.get('GmtCreateTime') is not None: - self.gmt_create_time = m.get('GmtCreateTime') - if m.get('GmtModifyTime') is not None: - self.gmt_modify_time = m.get('GmtModifyTime') - if m.get('MountPath') is not None: - self.mount_path = m.get('MountPath') + self.dataset_versions = [] + if m.get('DatasetVersions') is not None: + for k in m.get('DatasetVersions'): + temp_model = DatasetVersion() + self.dataset_versions.append(temp_model.from_map(k)) + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') return self -class GetCodeSourceResponse(TeaModel): +class ListDatasetVersionsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetCodeSourceResponseBody = None, + body: ListDatasetVersionsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -4844,16 +12158,40 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetCodeSourceResponseBody() + temp_model = ListDatasetVersionsResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetCodeSourcesStatisticsRequest(TeaModel): +class ListDatasetsRequest(TeaModel): def __init__( self, + data_source_types: str = None, + data_types: str = None, + label: str = None, + name: str = None, + order: str = None, + page_number: int = None, + page_size: int = None, + properties: str = None, + provider: str = None, + source_dataset_id: str = None, + source_id: str = None, + source_types: str = None, workspace_id: str = None, ): + self.data_source_types = data_source_types + self.data_types = data_types + self.label = label + self.name = name + self.order = order + self.page_number = page_number + self.page_size = page_size + self.properties = properties + self.provider = provider + self.source_dataset_id = source_dataset_id + self.source_id = source_id + self.source_types = source_types self.workspace_id = workspace_id def validate(self): @@ -4865,28 +12203,81 @@ def to_map(self): return _map result = dict() + if self.data_source_types is not None: + result['DataSourceTypes'] = self.data_source_types + if self.data_types is not None: + result['DataTypes'] = self.data_types + if self.label is not None: + result['Label'] = self.label + if self.name is not None: + result['Name'] = self.name + if self.order is not None: + result['Order'] = self.order + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.properties is not None: + result['Properties'] = self.properties + if self.provider is not None: + result['Provider'] = self.provider + if self.source_dataset_id is not None: + result['SourceDatasetId'] = self.source_dataset_id + if self.source_id is not None: + result['SourceId'] = self.source_id + if self.source_types is not None: + result['SourceTypes'] = self.source_types if self.workspace_id is not None: result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() + if m.get('DataSourceTypes') is not None: + self.data_source_types = m.get('DataSourceTypes') + if m.get('DataTypes') is not None: + self.data_types = m.get('DataTypes') + if m.get('Label') is not None: + self.label = m.get('Label') + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('Properties') is not None: + self.properties = m.get('Properties') + if m.get('Provider') is not None: + self.provider = m.get('Provider') + if m.get('SourceDatasetId') is not None: + self.source_dataset_id = m.get('SourceDatasetId') + if m.get('SourceId') is not None: + self.source_id = m.get('SourceId') + if m.get('SourceTypes') is not None: + self.source_types = m.get('SourceTypes') if m.get('WorkspaceId') is not None: self.workspace_id = m.get('WorkspaceId') return self -class GetCodeSourcesStatisticsResponseBody(TeaModel): +class ListDatasetsResponseBody(TeaModel): def __init__( self, - count: int = None, + datasets: List[Dataset] = None, request_id: str = None, + total_count: int = None, ): - self.count = count + self.datasets = datasets self.request_id = request_id + self.total_count = total_count def validate(self): - pass + if self.datasets: + for k in self.datasets: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -4894,36 +12285,42 @@ def to_map(self): return _map result = dict() - if self.count is not None: - result['Count'] = self.count + result['Datasets'] = [] + if self.datasets is not None: + for k in self.datasets: + result['Datasets'].append(k.to_map() if k else None) if self.request_id is not None: - result['requestId'] = self.request_id + result['RequestId'] = self.request_id + if self.total_count is not None: + result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Count') is not None: - self.count = m.get('Count') - if m.get('requestId') is not None: - self.request_id = m.get('requestId') + self.datasets = [] + if m.get('Datasets') is not None: + for k in m.get('Datasets'): + temp_model = Dataset() + self.datasets.append(temp_model.from_map(k)) + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') return self -class GetCodeSourcesStatisticsResponse(TeaModel): +class ListDatasetsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetCodeSourcesStatisticsResponseBody = None, + body: ListDatasetsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -4948,59 +12345,32 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetCodeSourcesStatisticsResponseBody() + temp_model = ListDatasetsResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetDatasetResponseBody(TeaModel): +class ListExperimentRequest(TeaModel): def __init__( self, - accessibility: str = None, - data_source_type: str = None, - data_type: str = None, - dataset_id: str = None, - description: str = None, - gmt_create_time: str = None, - gmt_modified_time: str = None, - labels: List[Label] = None, + labels: str = None, name: str = None, - options: str = None, - owner_id: str = None, - property: str = None, - provider_type: str = None, - request_id: str = None, - source_id: str = None, - source_type: str = None, - uri: str = None, - user_id: str = None, + order: str = None, + page_number: int = None, + page_size: int = None, + sort_by: str = None, workspace_id: str = None, ): - self.accessibility = accessibility - self.data_source_type = data_source_type - self.data_type = data_type - self.dataset_id = dataset_id - self.description = description - self.gmt_create_time = gmt_create_time - self.gmt_modified_time = gmt_modified_time self.labels = labels self.name = name - self.options = options - self.owner_id = owner_id - self.property = property - self.provider_type = provider_type - self.request_id = request_id - self.source_id = source_id - self.source_type = source_type - self.uri = uri - self.user_id = user_id + self.order = order + self.page_number = page_number + self.page_size = page_size + self.sort_by = sort_by self.workspace_id = workspace_id def validate(self): - if self.labels: - for k in self.labels: - if k: - k.validate() + pass def to_map(self): _map = super().to_map() @@ -5008,109 +12378,100 @@ def to_map(self): return _map result = dict() - if self.accessibility is not None: - result['Accessibility'] = self.accessibility - if self.data_source_type is not None: - result['DataSourceType'] = self.data_source_type - if self.data_type is not None: - result['DataType'] = self.data_type - if self.dataset_id is not None: - result['DatasetId'] = self.dataset_id - if self.description is not None: - result['Description'] = self.description - if self.gmt_create_time is not None: - result['GmtCreateTime'] = self.gmt_create_time - if self.gmt_modified_time is not None: - result['GmtModifiedTime'] = self.gmt_modified_time - result['Labels'] = [] if self.labels is not None: - for k in self.labels: - result['Labels'].append(k.to_map() if k else None) + result['Labels'] = self.labels if self.name is not None: result['Name'] = self.name - if self.options is not None: - result['Options'] = self.options - if self.owner_id is not None: - result['OwnerId'] = self.owner_id - if self.property is not None: - result['Property'] = self.property - if self.provider_type is not None: - result['ProviderType'] = self.provider_type - if self.request_id is not None: - result['RequestId'] = self.request_id - if self.source_id is not None: - result['SourceId'] = self.source_id - if self.source_type is not None: - result['SourceType'] = self.source_type - if self.uri is not None: - result['Uri'] = self.uri - if self.user_id is not None: - result['UserId'] = self.user_id + if self.order is not None: + result['Order'] = self.order + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.sort_by is not None: + result['SortBy'] = self.sort_by if self.workspace_id is not None: result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Accessibility') is not None: - self.accessibility = m.get('Accessibility') - if m.get('DataSourceType') is not None: - self.data_source_type = m.get('DataSourceType') - if m.get('DataType') is not None: - self.data_type = m.get('DataType') - if m.get('DatasetId') is not None: - self.dataset_id = m.get('DatasetId') - if m.get('Description') is not None: - self.description = m.get('Description') - if m.get('GmtCreateTime') is not None: - self.gmt_create_time = m.get('GmtCreateTime') - if m.get('GmtModifiedTime') is not None: - self.gmt_modified_time = m.get('GmtModifiedTime') - self.labels = [] if m.get('Labels') is not None: - for k in m.get('Labels'): - temp_model = Label() - self.labels.append(temp_model.from_map(k)) + self.labels = m.get('Labels') if m.get('Name') is not None: self.name = m.get('Name') - if m.get('Options') is not None: - self.options = m.get('Options') - if m.get('OwnerId') is not None: - self.owner_id = m.get('OwnerId') - if m.get('Property') is not None: - self.property = m.get('Property') - if m.get('ProviderType') is not None: - self.provider_type = m.get('ProviderType') - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') - if m.get('SourceId') is not None: - self.source_id = m.get('SourceId') - if m.get('SourceType') is not None: - self.source_type = m.get('SourceType') - if m.get('Uri') is not None: - self.uri = m.get('Uri') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') if m.get('WorkspaceId') is not None: self.workspace_id = m.get('WorkspaceId') return self -class GetDatasetResponse(TeaModel): +class ListExperimentResponseBody(TeaModel): + def __init__( + self, + experiments: List[Experiment] = None, + total_count: int = None, + request_id: str = None, + ): + self.experiments = experiments + self.total_count = total_count + self.request_id = request_id + + def validate(self): + if self.experiments: + for k in self.experiments: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + result['Experiments'] = [] + if self.experiments is not None: + for k in self.experiments: + result['Experiments'].append(k.to_map() if k else None) + if self.total_count is not None: + result['TotalCount'] = self.total_count + if self.request_id is not None: + result['requestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + self.experiments = [] + if m.get('Experiments') is not None: + for k in m.get('Experiments'): + temp_model = Experiment() + self.experiments.append(temp_model.from_map(k)) + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') + if m.get('requestId') is not None: + self.request_id = m.get('requestId') + return self + + +class ListExperimentResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetDatasetResponseBody = None, + body: ListExperimentResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -5135,17 +12496,17 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetDatasetResponseBody() + temp_model = ListExperimentResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetDatasetsStatisticsRequest(TeaModel): +class ListFeaturesRequest(TeaModel): def __init__( self, - workspace_id: str = None, + names: str = None, ): - self.workspace_id = workspace_id + self.names = names def validate(self): pass @@ -5156,25 +12517,27 @@ def to_map(self): return _map result = dict() - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.names is not None: + result['Names'] = self.names return result def from_map(self, m: dict = None): m = m or dict() - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') + if m.get('Names') is not None: + self.names = m.get('Names') return self -class GetDatasetsStatisticsResponseBody(TeaModel): +class ListFeaturesResponseBody(TeaModel): def __init__( self, - count: int = None, + features: List[str] = None, request_id: str = None, + total_count: int = None, ): - self.count = count + self.features = features self.request_id = request_id + self.total_count = total_count def validate(self): pass @@ -5185,36 +12548,37 @@ def to_map(self): return _map result = dict() - if self.count is not None: - result['Count'] = self.count + if self.features is not None: + result['Features'] = self.features if self.request_id is not None: result['RequestId'] = self.request_id + if self.total_count is not None: + result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Count') is not None: - self.count = m.get('Count') + if m.get('Features') is not None: + self.features = m.get('Features') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') return self -class GetDatasetsStatisticsResponse(TeaModel): +class ListFeaturesResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetDatasetsStatisticsResponseBody = None, + body: ListFeaturesResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -5239,17 +12603,25 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetDatasetsStatisticsResponseBody() + temp_model = ListFeaturesResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetDefaultWorkspaceRequest(TeaModel): +class ListGlobalPermissionsRequest(TeaModel): def __init__( self, - verbose: bool = None, + module_names: str = None, + operation_type: str = None, + page_number: int = None, + page_size: int = None, + resource_types: str = None, ): - self.verbose = verbose + self.module_names = module_names + self.operation_type = operation_type + self.page_number = page_number + self.page_size = page_size + self.resource_types = resource_types def validate(self): pass @@ -5260,27 +12632,41 @@ def to_map(self): return _map result = dict() - if self.verbose is not None: - result['Verbose'] = self.verbose + if self.module_names is not None: + result['ModuleNames'] = self.module_names + if self.operation_type is not None: + result['OperationType'] = self.operation_type + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.resource_types is not None: + result['ResourceTypes'] = self.resource_types return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Verbose') is not None: - self.verbose = m.get('Verbose') + if m.get('ModuleNames') is not None: + self.module_names = m.get('ModuleNames') + if m.get('OperationType') is not None: + self.operation_type = m.get('OperationType') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('ResourceTypes') is not None: + self.resource_types = m.get('ResourceTypes') return self -class GetDefaultWorkspaceResponseBodyConditions(TeaModel): - def __init__( - self, - code: int = None, - message: str = None, - type: str = None, +class ListGlobalPermissionsResponseBodyPermissionsPermissionRules(TeaModel): + def __init__( + self, + accessibility: str = None, + entity_access_type: str = None, ): - self.code = code - self.message = message - self.type = type + self.accessibility = accessibility + self.entity_access_type = entity_access_type def validate(self): pass @@ -5291,38 +12677,35 @@ def to_map(self): return _map result = dict() - if self.code is not None: - result['Code'] = self.code - if self.message is not None: - result['Message'] = self.message - if self.type is not None: - result['Type'] = self.type + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + if self.entity_access_type is not None: + result['EntityAccessType'] = self.entity_access_type return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Code') is not None: - self.code = m.get('Code') - if m.get('Message') is not None: - self.message = m.get('Message') - if m.get('Type') is not None: - self.type = m.get('Type') + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + if m.get('EntityAccessType') is not None: + self.entity_access_type = m.get('EntityAccessType') return self -class GetDefaultWorkspaceResponseBodyOwner(TeaModel): +class ListGlobalPermissionsResponseBodyPermissions(TeaModel): def __init__( self, - user_id: str = None, - user_kp: str = None, - user_name: str = None, + permission_code: str = None, + permission_rules: List[ListGlobalPermissionsResponseBodyPermissionsPermissionRules] = None, ): - self.user_id = user_id - self.user_kp = user_kp - self.user_name = user_name + self.permission_code = permission_code + self.permission_rules = permission_rules def validate(self): - pass + if self.permission_rules: + for k in self.permission_rules: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -5330,61 +12713,40 @@ def to_map(self): return _map result = dict() - if self.user_id is not None: - result['UserId'] = self.user_id - if self.user_kp is not None: - result['UserKp'] = self.user_kp - if self.user_name is not None: - result['UserName'] = self.user_name + if self.permission_code is not None: + result['PermissionCode'] = self.permission_code + result['PermissionRules'] = [] + if self.permission_rules is not None: + for k in self.permission_rules: + result['PermissionRules'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() - if m.get('UserId') is not None: - self.user_id = m.get('UserId') - if m.get('UserKp') is not None: - self.user_kp = m.get('UserKp') - if m.get('UserName') is not None: - self.user_name = m.get('UserName') + if m.get('PermissionCode') is not None: + self.permission_code = m.get('PermissionCode') + self.permission_rules = [] + if m.get('PermissionRules') is not None: + for k in m.get('PermissionRules'): + temp_model = ListGlobalPermissionsResponseBodyPermissionsPermissionRules() + self.permission_rules.append(temp_model.from_map(k)) return self -class GetDefaultWorkspaceResponseBody(TeaModel): +class ListGlobalPermissionsResponseBody(TeaModel): def __init__( self, - conditions: List[GetDefaultWorkspaceResponseBodyConditions] = None, - creator: str = None, - description: str = None, - display_name: str = None, - env_types: List[str] = None, - gmt_create_time: str = None, - gmt_modified_time: str = None, - owner: GetDefaultWorkspaceResponseBodyOwner = None, + permissions: List[ListGlobalPermissionsResponseBodyPermissions] = None, request_id: str = None, - status: str = None, - workspace_id: str = None, - workspace_name: str = None, ): - self.conditions = conditions - self.creator = creator - self.description = description - self.display_name = display_name - self.env_types = env_types - self.gmt_create_time = gmt_create_time - self.gmt_modified_time = gmt_modified_time - self.owner = owner + self.permissions = permissions self.request_id = request_id - self.status = status - self.workspace_id = workspace_id - self.workspace_name = workspace_name def validate(self): - if self.conditions: - for k in self.conditions: + if self.permissions: + for k in self.permissions: if k: k.validate() - if self.owner: - self.owner.validate() def to_map(self): _map = super().to_map() @@ -5392,82 +12754,38 @@ def to_map(self): return _map result = dict() - result['Conditions'] = [] - if self.conditions is not None: - for k in self.conditions: - result['Conditions'].append(k.to_map() if k else None) - if self.creator is not None: - result['Creator'] = self.creator - if self.description is not None: - result['Description'] = self.description - if self.display_name is not None: - result['DisplayName'] = self.display_name - if self.env_types is not None: - result['EnvTypes'] = self.env_types - if self.gmt_create_time is not None: - result['GmtCreateTime'] = self.gmt_create_time - if self.gmt_modified_time is not None: - result['GmtModifiedTime'] = self.gmt_modified_time - if self.owner is not None: - result['Owner'] = self.owner.to_map() + result['Permissions'] = [] + if self.permissions is not None: + for k in self.permissions: + result['Permissions'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id - if self.status is not None: - result['Status'] = self.status - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id - if self.workspace_name is not None: - result['WorkspaceName'] = self.workspace_name return result def from_map(self, m: dict = None): m = m or dict() - self.conditions = [] - if m.get('Conditions') is not None: - for k in m.get('Conditions'): - temp_model = GetDefaultWorkspaceResponseBodyConditions() - self.conditions.append(temp_model.from_map(k)) - if m.get('Creator') is not None: - self.creator = m.get('Creator') - if m.get('Description') is not None: - self.description = m.get('Description') - if m.get('DisplayName') is not None: - self.display_name = m.get('DisplayName') - if m.get('EnvTypes') is not None: - self.env_types = m.get('EnvTypes') - if m.get('GmtCreateTime') is not None: - self.gmt_create_time = m.get('GmtCreateTime') - if m.get('GmtModifiedTime') is not None: - self.gmt_modified_time = m.get('GmtModifiedTime') - if m.get('Owner') is not None: - temp_model = GetDefaultWorkspaceResponseBodyOwner() - self.owner = temp_model.from_map(m['Owner']) + self.permissions = [] + if m.get('Permissions') is not None: + for k in m.get('Permissions'): + temp_model = ListGlobalPermissionsResponseBodyPermissions() + self.permissions.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('Status') is not None: - self.status = m.get('Status') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') - if m.get('WorkspaceName') is not None: - self.workspace_name = m.get('WorkspaceName') return self -class GetDefaultWorkspaceResponse(TeaModel): +class ListGlobalPermissionsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetDefaultWorkspaceResponseBody = None, + body: ListGlobalPermissionsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -5492,46 +12810,18 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetDefaultWorkspaceResponseBody() + temp_model = ListGlobalPermissionsResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetImageRequest(TeaModel): - def __init__( - self, - verbose: bool = None, - ): - self.verbose = verbose - - def validate(self): - pass - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.verbose is not None: - result['Verbose'] = self.verbose - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('Verbose') is not None: - self.verbose = m.get('Verbose') - return self - - -class GetImageResponseBodyLabels(TeaModel): +class ListImageLabelKeysRequest(TeaModel): def __init__( self, - key: str = None, - value: str = None, + label_key_prefixes: str = None, ): - self.key = key - self.value = value + # This parameter is required. + self.label_key_prefixes = label_key_prefixes def validate(self): pass @@ -5542,53 +12832,30 @@ def to_map(self): return _map result = dict() - if self.key is not None: - result['Key'] = self.key - if self.value is not None: - result['Value'] = self.value + if self.label_key_prefixes is not None: + result['LabelKeyPrefixes'] = self.label_key_prefixes return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Key') is not None: - self.key = m.get('Key') - if m.get('Value') is not None: - self.value = m.get('Value') + if m.get('LabelKeyPrefixes') is not None: + self.label_key_prefixes = m.get('LabelKeyPrefixes') return self -class GetImageResponseBody(TeaModel): +class ListImageLabelKeysResponseBody(TeaModel): def __init__( self, - accessibility: str = None, - description: str = None, - gmt_create_time: str = None, - gmt_modified_time: str = None, - image_uri: str = None, - labels: List[GetImageResponseBodyLabels] = None, - name: str = None, - parent_user_id: str = None, + label_keys: List[Dict[str, List[str]]] = None, request_id: str = None, - user_id: str = None, - workspace_id: str = None, + total_count: int = None, ): - self.accessibility = accessibility - self.description = description - self.gmt_create_time = gmt_create_time - self.gmt_modified_time = gmt_modified_time - self.image_uri = image_uri - self.labels = labels - self.name = name - self.parent_user_id = parent_user_id + self.label_keys = label_keys self.request_id = request_id - self.user_id = user_id - self.workspace_id = workspace_id + self.total_count = total_count def validate(self): - if self.labels: - for k in self.labels: - if k: - k.validate() + pass def to_map(self): _map = super().to_map() @@ -5596,77 +12863,37 @@ def to_map(self): return _map result = dict() - if self.accessibility is not None: - result['Accessibility'] = self.accessibility - if self.description is not None: - result['Description'] = self.description - if self.gmt_create_time is not None: - result['GmtCreateTime'] = self.gmt_create_time - if self.gmt_modified_time is not None: - result['GmtModifiedTime'] = self.gmt_modified_time - if self.image_uri is not None: - result['ImageUri'] = self.image_uri - result['Labels'] = [] - if self.labels is not None: - for k in self.labels: - result['Labels'].append(k.to_map() if k else None) - if self.name is not None: - result['Name'] = self.name - if self.parent_user_id is not None: - result['ParentUserId'] = self.parent_user_id + if self.label_keys is not None: + result['LabelKeys'] = self.label_keys if self.request_id is not None: result['RequestId'] = self.request_id - if self.user_id is not None: - result['UserId'] = self.user_id - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.total_count is not None: + result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Accessibility') is not None: - self.accessibility = m.get('Accessibility') - if m.get('Description') is not None: - self.description = m.get('Description') - if m.get('GmtCreateTime') is not None: - self.gmt_create_time = m.get('GmtCreateTime') - if m.get('GmtModifiedTime') is not None: - self.gmt_modified_time = m.get('GmtModifiedTime') - if m.get('ImageUri') is not None: - self.image_uri = m.get('ImageUri') - self.labels = [] - if m.get('Labels') is not None: - for k in m.get('Labels'): - temp_model = GetImageResponseBodyLabels() - self.labels.append(temp_model.from_map(k)) - if m.get('Name') is not None: - self.name = m.get('Name') - if m.get('ParentUserId') is not None: - self.parent_user_id = m.get('ParentUserId') + if m.get('LabelKeys') is not None: + self.label_keys = m.get('LabelKeys') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') return self -class GetImageResponse(TeaModel): +class ListImageLabelKeysResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetImageResponseBody = None, + body: ListImageLabelKeysResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -5691,16 +12918,24 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetImageResponseBody() + temp_model = ListImageLabelKeysResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetImagesStatisticsRequest(TeaModel): +class ListImageLabelsRequest(TeaModel): def __init__( self, + image_id: str = None, + label_filter: str = None, + label_keys: str = None, + region: str = None, workspace_id: str = None, ): + self.image_id = image_id + self.label_filter = label_filter + self.label_keys = label_keys + self.region = region self.workspace_id = workspace_id def validate(self): @@ -5712,28 +12947,82 @@ def to_map(self): return _map result = dict() + if self.image_id is not None: + result['ImageId'] = self.image_id + if self.label_filter is not None: + result['LabelFilter'] = self.label_filter + if self.label_keys is not None: + result['LabelKeys'] = self.label_keys + if self.region is not None: + result['Region'] = self.region if self.workspace_id is not None: result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() + if m.get('ImageId') is not None: + self.image_id = m.get('ImageId') + if m.get('LabelFilter') is not None: + self.label_filter = m.get('LabelFilter') + if m.get('LabelKeys') is not None: + self.label_keys = m.get('LabelKeys') + if m.get('Region') is not None: + self.region = m.get('Region') if m.get('WorkspaceId') is not None: self.workspace_id = m.get('WorkspaceId') return self -class GetImagesStatisticsResponseBody(TeaModel): +class ListImageLabelsResponseBodyLabels(TeaModel): def __init__( self, - count: int = None, + key: str = None, + value: str = None, + ): + self.key = key + self.value = value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + +class ListImageLabelsResponseBody(TeaModel): + def __init__( + self, + labels: List[ListImageLabelsResponseBodyLabels] = None, request_id: str = None, + total_count: int = None, ): - self.count = count + self.labels = labels self.request_id = request_id + self.total_count = total_count def validate(self): - pass + if self.labels: + for k in self.labels: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -5741,36 +13030,42 @@ def to_map(self): return _map result = dict() - if self.count is not None: - result['Count'] = self.count + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id + if self.total_count is not None: + result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Count') is not None: - self.count = m.get('Count') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = ListImageLabelsResponseBodyLabels() + self.labels.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') return self -class GetImagesStatisticsResponse(TeaModel): +class ListImageLabelsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetImagesStatisticsResponseBody = None, + body: ListImageLabelsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -5795,56 +13090,39 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetImagesStatisticsResponseBody() + temp_model = ListImageLabelsResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetMemberRequest(TeaModel): - def __init__( - self, - user_id: str = None, - ): - self.user_id = user_id - - def validate(self): - pass - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.user_id is not None: - result['UserId'] = self.user_id - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('UserId') is not None: - self.user_id = m.get('UserId') - return self - - -class GetMemberResponseBody(TeaModel): +class ListImagesRequest(TeaModel): def __init__( self, - display_name: str = None, - gmt_create_time: str = None, - member_id: str = None, - member_name: str = None, - request_id: str = None, - roles: List[str] = None, + accessibility: str = None, + labels: str = None, + name: str = None, + order: str = None, + page_number: int = None, + page_size: int = None, + parent_user_id: str = None, + query: str = None, + sort_by: str = None, user_id: str = None, + verbose: bool = None, + workspace_id: str = None, ): - self.display_name = display_name - self.gmt_create_time = gmt_create_time - self.member_id = member_id - self.member_name = member_name - self.request_id = request_id - self.roles = roles + self.accessibility = accessibility + self.labels = labels + self.name = name + self.order = order + self.page_number = page_number + self.page_size = page_size + self.parent_user_id = parent_user_id + self.query = query + self.sort_by = sort_by self.user_id = user_id + self.verbose = verbose + self.workspace_id = workspace_id def validate(self): pass @@ -5855,58 +13133,72 @@ def to_map(self): return _map result = dict() - if self.display_name is not None: - result['DisplayName'] = self.display_name - if self.gmt_create_time is not None: - result['GmtCreateTime'] = self.gmt_create_time - if self.member_id is not None: - result['MemberId'] = self.member_id - if self.member_name is not None: - result['MemberName'] = self.member_name - if self.request_id is not None: - result['RequestId'] = self.request_id - if self.roles is not None: - result['Roles'] = self.roles + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + if self.labels is not None: + result['Labels'] = self.labels + if self.name is not None: + result['Name'] = self.name + if self.order is not None: + result['Order'] = self.order + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.parent_user_id is not None: + result['ParentUserId'] = self.parent_user_id + if self.query is not None: + result['Query'] = self.query + if self.sort_by is not None: + result['SortBy'] = self.sort_by if self.user_id is not None: result['UserId'] = self.user_id + if self.verbose is not None: + result['Verbose'] = self.verbose + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('DisplayName') is not None: - self.display_name = m.get('DisplayName') - if m.get('GmtCreateTime') is not None: - self.gmt_create_time = m.get('GmtCreateTime') - if m.get('MemberId') is not None: - self.member_id = m.get('MemberId') - if m.get('MemberName') is not None: - self.member_name = m.get('MemberName') - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') - if m.get('Roles') is not None: - self.roles = m.get('Roles') + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + if m.get('Labels') is not None: + self.labels = m.get('Labels') + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('ParentUserId') is not None: + self.parent_user_id = m.get('ParentUserId') + if m.get('Query') is not None: + self.query = m.get('Query') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') if m.get('UserId') is not None: self.user_id = m.get('UserId') + if m.get('Verbose') is not None: + self.verbose = m.get('Verbose') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class GetMemberResponse(TeaModel): +class ListImagesResponseBodyImagesLabels(TeaModel): def __init__( self, - headers: Dict[str, str] = None, - status_code: int = None, - body: GetMemberResponseBody = None, + key: str = None, + value: str = None, ): - self.headers = headers - self.status_code = status_code - self.body = body + self.key = key + self.value = value def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() + pass def to_map(self): _map = super().to_map() @@ -5914,62 +13206,47 @@ def to_map(self): return _map result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = GetMemberResponseBody() - self.body = temp_model.from_map(m['body']) + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') return self -class GetModelResponseBody(TeaModel): +class ListImagesResponseBodyImages(TeaModel): def __init__( self, accessibility: str = None, - domain: str = None, + description: str = None, gmt_create_time: str = None, gmt_modified_time: str = None, - labels: List[Label] = None, - latest_version: ModelVersion = None, - model_description: str = None, - model_doc: str = None, - model_id: str = None, - model_name: str = None, - origin: str = None, - owner_id: str = None, - provider: str = None, - request_id: str = None, - task: str = None, + image_id: str = None, + image_uri: str = None, + labels: List[ListImagesResponseBodyImagesLabels] = None, + name: str = None, + parent_user_id: str = None, + size: int = None, user_id: str = None, workspace_id: str = None, ): self.accessibility = accessibility - self.domain = domain + self.description = description self.gmt_create_time = gmt_create_time self.gmt_modified_time = gmt_modified_time + self.image_id = image_id + self.image_uri = image_uri self.labels = labels - self.latest_version = latest_version - self.model_description = model_description - self.model_doc = model_doc - self.model_id = model_id - self.model_name = model_name - self.origin = origin - self.owner_id = owner_id - self.provider = provider - self.request_id = request_id - self.task = task + self.name = name + self.parent_user_id = parent_user_id + self.size = size self.user_id = user_id self.workspace_id = workspace_id @@ -5978,8 +13255,6 @@ def validate(self): for k in self.labels: if k: k.validate() - if self.latest_version: - self.latest_version.validate() def to_map(self): _map = super().to_map() @@ -5989,36 +13264,26 @@ def to_map(self): result = dict() if self.accessibility is not None: result['Accessibility'] = self.accessibility - if self.domain is not None: - result['Domain'] = self.domain + if self.description is not None: + result['Description'] = self.description if self.gmt_create_time is not None: result['GmtCreateTime'] = self.gmt_create_time if self.gmt_modified_time is not None: result['GmtModifiedTime'] = self.gmt_modified_time + if self.image_id is not None: + result['ImageId'] = self.image_id + if self.image_uri is not None: + result['ImageUri'] = self.image_uri result['Labels'] = [] if self.labels is not None: for k in self.labels: result['Labels'].append(k.to_map() if k else None) - if self.latest_version is not None: - result['LatestVersion'] = self.latest_version.to_map() - if self.model_description is not None: - result['ModelDescription'] = self.model_description - if self.model_doc is not None: - result['ModelDoc'] = self.model_doc - if self.model_id is not None: - result['ModelId'] = self.model_id - if self.model_name is not None: - result['ModelName'] = self.model_name - if self.origin is not None: - result['Origin'] = self.origin - if self.owner_id is not None: - result['OwnerId'] = self.owner_id - if self.provider is not None: - result['Provider'] = self.provider - if self.request_id is not None: - result['RequestId'] = self.request_id - if self.task is not None: - result['Task'] = self.task + if self.name is not None: + result['Name'] = self.name + if self.parent_user_id is not None: + result['ParentUserId'] = self.parent_user_id + if self.size is not None: + result['Size'] = self.size if self.user_id is not None: result['UserId'] = self.user_id if self.workspace_id is not None: @@ -6029,38 +13294,27 @@ def from_map(self, m: dict = None): m = m or dict() if m.get('Accessibility') is not None: self.accessibility = m.get('Accessibility') - if m.get('Domain') is not None: - self.domain = m.get('Domain') + if m.get('Description') is not None: + self.description = m.get('Description') if m.get('GmtCreateTime') is not None: self.gmt_create_time = m.get('GmtCreateTime') if m.get('GmtModifiedTime') is not None: self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('ImageId') is not None: + self.image_id = m.get('ImageId') + if m.get('ImageUri') is not None: + self.image_uri = m.get('ImageUri') self.labels = [] if m.get('Labels') is not None: for k in m.get('Labels'): - temp_model = Label() + temp_model = ListImagesResponseBodyImagesLabels() self.labels.append(temp_model.from_map(k)) - if m.get('LatestVersion') is not None: - temp_model = ModelVersion() - self.latest_version = temp_model.from_map(m['LatestVersion']) - if m.get('ModelDescription') is not None: - self.model_description = m.get('ModelDescription') - if m.get('ModelDoc') is not None: - self.model_doc = m.get('ModelDoc') - if m.get('ModelId') is not None: - self.model_id = m.get('ModelId') - if m.get('ModelName') is not None: - self.model_name = m.get('ModelName') - if m.get('Origin') is not None: - self.origin = m.get('Origin') - if m.get('OwnerId') is not None: - self.owner_id = m.get('OwnerId') - if m.get('Provider') is not None: - self.provider = m.get('Provider') - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') - if m.get('Task') is not None: - self.task = m.get('Task') + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('ParentUserId') is not None: + self.parent_user_id = m.get('ParentUserId') + if m.get('Size') is not None: + self.size = m.get('Size') if m.get('UserId') is not None: self.user_id = m.get('UserId') if m.get('WorkspaceId') is not None: @@ -6068,196 +13322,65 @@ def from_map(self, m: dict = None): return self -class GetModelResponse(TeaModel): - def __init__( - self, - headers: Dict[str, str] = None, - status_code: int = None, - body: GetModelResponseBody = None, - ): - self.headers = headers - self.status_code = status_code - self.body = body - - def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = GetModelResponseBody() - self.body = temp_model.from_map(m['body']) - return self - - -class GetModelVersionResponseBody(TeaModel): +class ListImagesResponseBody(TeaModel): def __init__( self, - approval_status: str = None, - format_type: str = None, - framework_type: str = None, - gmt_create_time: str = None, - gmt_modified_time: str = None, - inference_spec: Dict[str, Any] = None, - labels: List[Label] = None, - options: str = None, - owner_id: str = None, + images: List[ListImagesResponseBodyImages] = None, request_id: str = None, - source_id: str = None, - source_type: str = None, - training_spec: Dict[str, Any] = None, - uri: str = None, - user_id: str = None, - version_description: str = None, - version_name: str = None, + total_count: int = None, ): - self.approval_status = approval_status - self.format_type = format_type - self.framework_type = framework_type - self.gmt_create_time = gmt_create_time - self.gmt_modified_time = gmt_modified_time - self.inference_spec = inference_spec - self.labels = labels - self.options = options - self.owner_id = owner_id + self.images = images self.request_id = request_id - self.source_id = source_id - self.source_type = source_type - self.training_spec = training_spec - self.uri = uri - self.user_id = user_id - self.version_description = version_description - self.version_name = version_name + self.total_count = total_count def validate(self): - if self.labels: - for k in self.labels: + if self.images: + for k in self.images: if k: k.validate() def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.approval_status is not None: - result['ApprovalStatus'] = self.approval_status - if self.format_type is not None: - result['FormatType'] = self.format_type - if self.framework_type is not None: - result['FrameworkType'] = self.framework_type - if self.gmt_create_time is not None: - result['GmtCreateTime'] = self.gmt_create_time - if self.gmt_modified_time is not None: - result['GmtModifiedTime'] = self.gmt_modified_time - if self.inference_spec is not None: - result['InferenceSpec'] = self.inference_spec - result['Labels'] = [] - if self.labels is not None: - for k in self.labels: - result['Labels'].append(k.to_map() if k else None) - if self.options is not None: - result['Options'] = self.options - if self.owner_id is not None: - result['OwnerId'] = self.owner_id + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + result['Images'] = [] + if self.images is not None: + for k in self.images: + result['Images'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id - if self.source_id is not None: - result['SourceId'] = self.source_id - if self.source_type is not None: - result['SourceType'] = self.source_type - if self.training_spec is not None: - result['TrainingSpec'] = self.training_spec - if self.uri is not None: - result['Uri'] = self.uri - if self.user_id is not None: - result['UserId'] = self.user_id - if self.version_description is not None: - result['VersionDescription'] = self.version_description - if self.version_name is not None: - result['VersionName'] = self.version_name + if self.total_count is not None: + result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ApprovalStatus') is not None: - self.approval_status = m.get('ApprovalStatus') - if m.get('FormatType') is not None: - self.format_type = m.get('FormatType') - if m.get('FrameworkType') is not None: - self.framework_type = m.get('FrameworkType') - if m.get('GmtCreateTime') is not None: - self.gmt_create_time = m.get('GmtCreateTime') - if m.get('GmtModifiedTime') is not None: - self.gmt_modified_time = m.get('GmtModifiedTime') - if m.get('InferenceSpec') is not None: - self.inference_spec = m.get('InferenceSpec') - self.labels = [] - if m.get('Labels') is not None: - for k in m.get('Labels'): - temp_model = Label() - self.labels.append(temp_model.from_map(k)) - if m.get('Options') is not None: - self.options = m.get('Options') - if m.get('OwnerId') is not None: - self.owner_id = m.get('OwnerId') + self.images = [] + if m.get('Images') is not None: + for k in m.get('Images'): + temp_model = ListImagesResponseBodyImages() + self.images.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('SourceId') is not None: - self.source_id = m.get('SourceId') - if m.get('SourceType') is not None: - self.source_type = m.get('SourceType') - if m.get('TrainingSpec') is not None: - self.training_spec = m.get('TrainingSpec') - if m.get('Uri') is not None: - self.uri = m.get('Uri') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') - if m.get('VersionDescription') is not None: - self.version_description = m.get('VersionDescription') - if m.get('VersionName') is not None: - self.version_name = m.get('VersionName') + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') return self -class GetModelVersionResponse(TeaModel): +class ListImagesResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetModelVersionResponseBody = None, + body: ListImagesResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -6282,19 +13405,23 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetModelVersionResponseBody() + temp_model = ListImagesResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetPermissionRequest(TeaModel): +class ListMembersRequest(TeaModel): def __init__( self, - accessibility: str = None, - creator: str = None, + member_name: str = None, + page_number: int = None, + page_size: int = None, + roles: str = None, ): - self.accessibility = accessibility - self.creator = creator + self.member_name = member_name + self.page_number = page_number + self.page_size = page_size + self.roles = roles def validate(self): pass @@ -6305,29 +13432,45 @@ def to_map(self): return _map result = dict() - if self.accessibility is not None: - result['Accessibility'] = self.accessibility - if self.creator is not None: - result['Creator'] = self.creator + if self.member_name is not None: + result['MemberName'] = self.member_name + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.roles is not None: + result['Roles'] = self.roles return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Accessibility') is not None: - self.accessibility = m.get('Accessibility') - if m.get('Creator') is not None: - self.creator = m.get('Creator') + if m.get('MemberName') is not None: + self.member_name = m.get('MemberName') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('Roles') is not None: + self.roles = m.get('Roles') return self -class GetPermissionResponseBodyPermissionRules(TeaModel): +class ListMembersResponseBodyMembers(TeaModel): def __init__( self, - accessibility: str = None, - entity_access_type: str = None, + display_name: str = None, + gmt_create_time: str = None, + member_id: str = None, + member_name: str = None, + roles: List[str] = None, + user_id: str = None, ): - self.accessibility = accessibility - self.entity_access_type = entity_access_type + self.display_name = display_name + self.gmt_create_time = gmt_create_time + self.member_id = member_id + self.member_name = member_name + self.roles = roles + self.user_id = user_id def validate(self): pass @@ -6338,35 +13481,51 @@ def to_map(self): return _map result = dict() - if self.accessibility is not None: - result['Accessibility'] = self.accessibility - if self.entity_access_type is not None: - result['EntityAccessType'] = self.entity_access_type + if self.display_name is not None: + result['DisplayName'] = self.display_name + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.member_id is not None: + result['MemberId'] = self.member_id + if self.member_name is not None: + result['MemberName'] = self.member_name + if self.roles is not None: + result['Roles'] = self.roles + if self.user_id is not None: + result['UserId'] = self.user_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Accessibility') is not None: - self.accessibility = m.get('Accessibility') - if m.get('EntityAccessType') is not None: - self.entity_access_type = m.get('EntityAccessType') + if m.get('DisplayName') is not None: + self.display_name = m.get('DisplayName') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('MemberId') is not None: + self.member_id = m.get('MemberId') + if m.get('MemberName') is not None: + self.member_name = m.get('MemberName') + if m.get('Roles') is not None: + self.roles = m.get('Roles') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') return self -class GetPermissionResponseBody(TeaModel): +class ListMembersResponseBody(TeaModel): def __init__( self, - permission_code: str = None, - permission_rules: List[GetPermissionResponseBodyPermissionRules] = None, + members: List[ListMembersResponseBodyMembers] = None, request_id: str = None, + total_count: int = None, ): - self.permission_code = permission_code - self.permission_rules = permission_rules + self.members = members self.request_id = request_id + self.total_count = total_count def validate(self): - if self.permission_rules: - for k in self.permission_rules: + if self.members: + for k in self.members: if k: k.validate() @@ -6376,45 +13535,42 @@ def to_map(self): return _map result = dict() - if self.permission_code is not None: - result['PermissionCode'] = self.permission_code - result['PermissionRules'] = [] - if self.permission_rules is not None: - for k in self.permission_rules: - result['PermissionRules'].append(k.to_map() if k else None) + result['Members'] = [] + if self.members is not None: + for k in self.members: + result['Members'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id + if self.total_count is not None: + result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() - if m.get('PermissionCode') is not None: - self.permission_code = m.get('PermissionCode') - self.permission_rules = [] - if m.get('PermissionRules') is not None: - for k in m.get('PermissionRules'): - temp_model = GetPermissionResponseBodyPermissionRules() - self.permission_rules.append(temp_model.from_map(k)) + self.members = [] + if m.get('Members') is not None: + for k in m.get('Members'): + temp_model = ListMembersResponseBodyMembers() + self.members.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') return self -class GetPermissionResponse(TeaModel): +class ListMembersResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetPermissionResponseBody = None, + body: ListMembersResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -6439,17 +13595,17 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetPermissionResponseBody() + temp_model = ListMembersResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetRoleStatisticsRequest(TeaModel): +class ListModelDomainsRequest(TeaModel): def __init__( self, - workspace_id: str = None, + model_domain_ids: str = None, ): - self.workspace_id = workspace_id + self.model_domain_ids = model_domain_ids def validate(self): pass @@ -6460,28 +13616,86 @@ def to_map(self): return _map result = dict() - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.model_domain_ids is not None: + result['ModelDomainIds'] = self.model_domain_ids + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ModelDomainIds') is not None: + self.model_domain_ids = m.get('ModelDomainIds') + return self + + +class ListModelDomainsResponseBodyModelDomainsModelTasks(TeaModel): + def __init__( + self, + model_domain_id: str = None, + model_task_id: str = None, + model_task_name: str = None, + order_number: int = None, + search_words: str = None, + ): + self.model_domain_id = model_domain_id + self.model_task_id = model_task_id + self.model_task_name = model_task_name + self.order_number = order_number + self.search_words = search_words + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.model_domain_id is not None: + result['ModelDomainId'] = self.model_domain_id + if self.model_task_id is not None: + result['ModelTaskId'] = self.model_task_id + if self.model_task_name is not None: + result['ModelTaskName'] = self.model_task_name + if self.order_number is not None: + result['OrderNumber'] = self.order_number + if self.search_words is not None: + result['SearchWords'] = self.search_words return result def from_map(self, m: dict = None): m = m or dict() - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') + if m.get('ModelDomainId') is not None: + self.model_domain_id = m.get('ModelDomainId') + if m.get('ModelTaskId') is not None: + self.model_task_id = m.get('ModelTaskId') + if m.get('ModelTaskName') is not None: + self.model_task_name = m.get('ModelTaskName') + if m.get('OrderNumber') is not None: + self.order_number = m.get('OrderNumber') + if m.get('SearchWords') is not None: + self.search_words = m.get('SearchWords') return self -class GetRoleStatisticsResponseBodyRoles(TeaModel): +class ListModelDomainsResponseBodyModelDomains(TeaModel): def __init__( self, - member_size: int = None, - role_name: str = None, + model_domain_id: str = None, + model_domain_name: str = None, + model_tasks: List[ListModelDomainsResponseBodyModelDomainsModelTasks] = None, + order_number: int = None, ): - self.member_size = member_size - self.role_name = role_name + self.model_domain_id = model_domain_id + self.model_domain_name = model_domain_name + self.model_tasks = model_tasks + self.order_number = order_number def validate(self): - pass + if self.model_tasks: + for k in self.model_tasks: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -6489,35 +13703,48 @@ def to_map(self): return _map result = dict() - if self.member_size is not None: - result['MemberSize'] = self.member_size - if self.role_name is not None: - result['RoleName'] = self.role_name + if self.model_domain_id is not None: + result['ModelDomainId'] = self.model_domain_id + if self.model_domain_name is not None: + result['ModelDomainName'] = self.model_domain_name + result['ModelTasks'] = [] + if self.model_tasks is not None: + for k in self.model_tasks: + result['ModelTasks'].append(k.to_map() if k else None) + if self.order_number is not None: + result['OrderNumber'] = self.order_number return result def from_map(self, m: dict = None): m = m or dict() - if m.get('MemberSize') is not None: - self.member_size = m.get('MemberSize') - if m.get('RoleName') is not None: - self.role_name = m.get('RoleName') + if m.get('ModelDomainId') is not None: + self.model_domain_id = m.get('ModelDomainId') + if m.get('ModelDomainName') is not None: + self.model_domain_name = m.get('ModelDomainName') + self.model_tasks = [] + if m.get('ModelTasks') is not None: + for k in m.get('ModelTasks'): + temp_model = ListModelDomainsResponseBodyModelDomainsModelTasks() + self.model_tasks.append(temp_model.from_map(k)) + if m.get('OrderNumber') is not None: + self.order_number = m.get('OrderNumber') return self -class GetRoleStatisticsResponseBody(TeaModel): +class ListModelDomainsResponseBody(TeaModel): def __init__( self, + model_domains: List[ListModelDomainsResponseBodyModelDomains] = None, request_id: str = None, - roles: List[GetRoleStatisticsResponseBodyRoles] = None, total_count: int = None, ): + self.model_domains = model_domains self.request_id = request_id - self.roles = roles self.total_count = total_count def validate(self): - if self.roles: - for k in self.roles: + if self.model_domains: + for k in self.model_domains: if k: k.validate() @@ -6527,45 +13754,42 @@ def to_map(self): return _map result = dict() + result['ModelDomains'] = [] + if self.model_domains is not None: + for k in self.model_domains: + result['ModelDomains'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id - result['Roles'] = [] - if self.roles is not None: - for k in self.roles: - result['Roles'].append(k.to_map() if k else None) if self.total_count is not None: result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() + self.model_domains = [] + if m.get('ModelDomains') is not None: + for k in m.get('ModelDomains'): + temp_model = ListModelDomainsResponseBodyModelDomains() + self.model_domains.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - self.roles = [] - if m.get('Roles') is not None: - for k in m.get('Roles'): - temp_model = GetRoleStatisticsResponseBodyRoles() - self.roles.append(temp_model.from_map(k)) if m.get('TotalCount') is not None: self.total_count = m.get('TotalCount') return self -class GetRoleStatisticsResponse(TeaModel): +class ListModelDomainsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetRoleStatisticsResponseBody = None, + body: ListModelDomainsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -6590,50 +13814,41 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetRoleStatisticsResponseBody() + temp_model = ListModelDomainsResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetWorkspaceRequest(TeaModel): - def __init__( - self, - verbose: bool = None, - ): - self.verbose = verbose - - def validate(self): - pass - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.verbose is not None: - result['Verbose'] = self.verbose - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('Verbose') is not None: - self.verbose = m.get('Verbose') - return self - - -class GetWorkspaceResponseBodyOwner(TeaModel): +class ListModelVersionsRequest(TeaModel): def __init__( self, - display_name: str = None, - user_id: str = None, - user_kp: str = None, - user_name: str = None, + approval_status: str = None, + format_type: str = None, + framework_type: str = None, + label: str = None, + label_string: str = None, + labels: str = None, + order: str = None, + page_number: int = None, + page_size: int = None, + sort_by: str = None, + source_id: str = None, + source_type: str = None, + version_name: str = None, ): - self.display_name = display_name - self.user_id = user_id - self.user_kp = user_kp - self.user_name = user_name + self.approval_status = approval_status + self.format_type = format_type + self.framework_type = framework_type + self.label = label + self.label_string = label_string + self.labels = labels + self.order = order + self.page_number = page_number + self.page_size = page_size + self.sort_by = sort_by + self.source_id = source_id + self.source_type = source_type + self.version_name = version_name def validate(self): pass @@ -6644,65 +13859,81 @@ def to_map(self): return _map result = dict() - if self.display_name is not None: - result['DisplayName'] = self.display_name - if self.user_id is not None: - result['UserId'] = self.user_id - if self.user_kp is not None: - result['UserKp'] = self.user_kp - if self.user_name is not None: - result['UserName'] = self.user_name + if self.approval_status is not None: + result['ApprovalStatus'] = self.approval_status + if self.format_type is not None: + result['FormatType'] = self.format_type + if self.framework_type is not None: + result['FrameworkType'] = self.framework_type + if self.label is not None: + result['Label'] = self.label + if self.label_string is not None: + result['LabelString'] = self.label_string + if self.labels is not None: + result['Labels'] = self.labels + if self.order is not None: + result['Order'] = self.order + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.sort_by is not None: + result['SortBy'] = self.sort_by + if self.source_id is not None: + result['SourceId'] = self.source_id + if self.source_type is not None: + result['SourceType'] = self.source_type + if self.version_name is not None: + result['VersionName'] = self.version_name return result def from_map(self, m: dict = None): m = m or dict() - if m.get('DisplayName') is not None: - self.display_name = m.get('DisplayName') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') - if m.get('UserKp') is not None: - self.user_kp = m.get('UserKp') - if m.get('UserName') is not None: - self.user_name = m.get('UserName') + if m.get('ApprovalStatus') is not None: + self.approval_status = m.get('ApprovalStatus') + if m.get('FormatType') is not None: + self.format_type = m.get('FormatType') + if m.get('FrameworkType') is not None: + self.framework_type = m.get('FrameworkType') + if m.get('Label') is not None: + self.label = m.get('Label') + if m.get('LabelString') is not None: + self.label_string = m.get('LabelString') + if m.get('Labels') is not None: + self.labels = m.get('Labels') + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') + if m.get('SourceId') is not None: + self.source_id = m.get('SourceId') + if m.get('SourceType') is not None: + self.source_type = m.get('SourceType') + if m.get('VersionName') is not None: + self.version_name = m.get('VersionName') return self -class GetWorkspaceResponseBody(TeaModel): +class ListModelVersionsResponseBody(TeaModel): def __init__( self, - admin_names: List[str] = None, - creator: str = None, - description: str = None, - display_name: str = None, - env_types: List[str] = None, - extra_infos: Dict[str, Any] = None, - gmt_create_time: str = None, - gmt_modified_time: str = None, - is_default: bool = None, - owner: GetWorkspaceResponseBodyOwner = None, request_id: str = None, - status: str = None, - workspace_id: str = None, - workspace_name: str = None, - ): - self.admin_names = admin_names - self.creator = creator - self.description = description - self.display_name = display_name - self.env_types = env_types - self.extra_infos = extra_infos - self.gmt_create_time = gmt_create_time - self.gmt_modified_time = gmt_modified_time - self.is_default = is_default - self.owner = owner + total_count: int = None, + versions: List[ModelVersion] = None, + ): self.request_id = request_id - self.status = status - self.workspace_id = workspace_id - self.workspace_name = workspace_name + self.total_count = total_count + self.versions = versions def validate(self): - if self.owner: - self.owner.validate() + if self.versions: + for k in self.versions: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -6710,85 +13941,42 @@ def to_map(self): return _map result = dict() - if self.admin_names is not None: - result['AdminNames'] = self.admin_names - if self.creator is not None: - result['Creator'] = self.creator - if self.description is not None: - result['Description'] = self.description - if self.display_name is not None: - result['DisplayName'] = self.display_name - if self.env_types is not None: - result['EnvTypes'] = self.env_types - if self.extra_infos is not None: - result['ExtraInfos'] = self.extra_infos - if self.gmt_create_time is not None: - result['GmtCreateTime'] = self.gmt_create_time - if self.gmt_modified_time is not None: - result['GmtModifiedTime'] = self.gmt_modified_time - if self.is_default is not None: - result['IsDefault'] = self.is_default - if self.owner is not None: - result['Owner'] = self.owner.to_map() if self.request_id is not None: result['RequestId'] = self.request_id - if self.status is not None: - result['Status'] = self.status - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id - if self.workspace_name is not None: - result['WorkspaceName'] = self.workspace_name + if self.total_count is not None: + result['TotalCount'] = self.total_count + result['Versions'] = [] + if self.versions is not None: + for k in self.versions: + result['Versions'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() - if m.get('AdminNames') is not None: - self.admin_names = m.get('AdminNames') - if m.get('Creator') is not None: - self.creator = m.get('Creator') - if m.get('Description') is not None: - self.description = m.get('Description') - if m.get('DisplayName') is not None: - self.display_name = m.get('DisplayName') - if m.get('EnvTypes') is not None: - self.env_types = m.get('EnvTypes') - if m.get('ExtraInfos') is not None: - self.extra_infos = m.get('ExtraInfos') - if m.get('GmtCreateTime') is not None: - self.gmt_create_time = m.get('GmtCreateTime') - if m.get('GmtModifiedTime') is not None: - self.gmt_modified_time = m.get('GmtModifiedTime') - if m.get('IsDefault') is not None: - self.is_default = m.get('IsDefault') - if m.get('Owner') is not None: - temp_model = GetWorkspaceResponseBodyOwner() - self.owner = temp_model.from_map(m['Owner']) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('Status') is not None: - self.status = m.get('Status') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') - if m.get('WorkspaceName') is not None: - self.workspace_name = m.get('WorkspaceName') + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') + self.versions = [] + if m.get('Versions') is not None: + for k in m.get('Versions'): + temp_model = ModelVersion() + self.versions.append(temp_model.from_map(k)) return self -class GetWorkspaceResponse(TeaModel): +class ListModelVersionsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetWorkspaceResponseBody = None, + body: ListModelVersionsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -6813,26 +14001,46 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetWorkspaceResponseBody() + temp_model = ListModelVersionsResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListCodeSourcesRequest(TeaModel): +class ListModelsRequest(TeaModel): def __init__( self, - display_name: str = None, + collections: str = None, + domain: str = None, + label: str = None, + label_string: str = None, + labels: str = None, + model_name: str = None, + model_type: str = None, order: str = None, + origin: str = None, page_number: int = None, page_size: int = None, + provider: str = None, + query: str = None, sort_by: str = None, + task: str = None, workspace_id: str = None, ): - self.display_name = display_name + self.collections = collections + self.domain = domain + self.label = label + self.label_string = label_string + self.labels = labels + self.model_name = model_name + self.model_type = model_type self.order = order + self.origin = origin self.page_number = page_number self.page_size = page_size + self.provider = provider + self.query = query self.sort_by = sort_by + self.task = task self.workspace_id = workspace_id def validate(self): @@ -6844,51 +14052,91 @@ def to_map(self): return _map result = dict() - if self.display_name is not None: - result['DisplayName'] = self.display_name + if self.collections is not None: + result['Collections'] = self.collections + if self.domain is not None: + result['Domain'] = self.domain + if self.label is not None: + result['Label'] = self.label + if self.label_string is not None: + result['LabelString'] = self.label_string + if self.labels is not None: + result['Labels'] = self.labels + if self.model_name is not None: + result['ModelName'] = self.model_name + if self.model_type is not None: + result['ModelType'] = self.model_type if self.order is not None: result['Order'] = self.order + if self.origin is not None: + result['Origin'] = self.origin if self.page_number is not None: result['PageNumber'] = self.page_number if self.page_size is not None: result['PageSize'] = self.page_size + if self.provider is not None: + result['Provider'] = self.provider + if self.query is not None: + result['Query'] = self.query if self.sort_by is not None: result['SortBy'] = self.sort_by + if self.task is not None: + result['Task'] = self.task if self.workspace_id is not None: result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('DisplayName') is not None: - self.display_name = m.get('DisplayName') + if m.get('Collections') is not None: + self.collections = m.get('Collections') + if m.get('Domain') is not None: + self.domain = m.get('Domain') + if m.get('Label') is not None: + self.label = m.get('Label') + if m.get('LabelString') is not None: + self.label_string = m.get('LabelString') + if m.get('Labels') is not None: + self.labels = m.get('Labels') + if m.get('ModelName') is not None: + self.model_name = m.get('ModelName') + if m.get('ModelType') is not None: + self.model_type = m.get('ModelType') if m.get('Order') is not None: self.order = m.get('Order') + if m.get('Origin') is not None: + self.origin = m.get('Origin') if m.get('PageNumber') is not None: self.page_number = m.get('PageNumber') if m.get('PageSize') is not None: self.page_size = m.get('PageSize') + if m.get('Provider') is not None: + self.provider = m.get('Provider') + if m.get('Query') is not None: + self.query = m.get('Query') if m.get('SortBy') is not None: self.sort_by = m.get('SortBy') + if m.get('Task') is not None: + self.task = m.get('Task') if m.get('WorkspaceId') is not None: self.workspace_id = m.get('WorkspaceId') return self -class ListCodeSourcesResponseBody(TeaModel): +class ListModelsResponseBody(TeaModel): def __init__( self, - code_sources: List[CodeSourceItem] = None, + models: List[Model] = None, request_id: str = None, total_count: int = None, ): - self.code_sources = code_sources + self.models = models self.request_id = request_id self.total_count = total_count def validate(self): - if self.code_sources: - for k in self.code_sources: + if self.models: + for k in self.models: if k: k.validate() @@ -6898,10 +14146,10 @@ def to_map(self): return _map result = dict() - result['CodeSources'] = [] - if self.code_sources is not None: - for k in self.code_sources: - result['CodeSources'].append(k.to_map() if k else None) + result['Models'] = [] + if self.models is not None: + for k in self.models: + result['Models'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id if self.total_count is not None: @@ -6910,11 +14158,11 @@ def to_map(self): def from_map(self, m: dict = None): m = m or dict() - self.code_sources = [] - if m.get('CodeSources') is not None: - for k in m.get('CodeSources'): - temp_model = CodeSourceItem() - self.code_sources.append(temp_model.from_map(k)) + self.models = [] + if m.get('Models') is not None: + for k in m.get('Models'): + temp_model = Model() + self.models.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') if m.get('TotalCount') is not None: @@ -6922,21 +14170,18 @@ def from_map(self, m: dict = None): return self -class ListCodeSourcesResponse(TeaModel): +class ListModelsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListCodeSourcesResponseBody = None, + body: ListModelsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -6961,17 +14206,19 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListCodeSourcesResponseBody() + temp_model = ListModelsResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListConfigsRequest(TeaModel): +class ListModuleConfigsRequest(TeaModel): def __init__( self, - config_keys: str = None, + module_codes: str = None, + region: str = None, ): - self.config_keys = config_keys + self.module_codes = module_codes + self.region = region def validate(self): pass @@ -6982,25 +14229,29 @@ def to_map(self): return _map result = dict() - if self.config_keys is not None: - result['ConfigKeys'] = self.config_keys + if self.module_codes is not None: + result['ModuleCodes'] = self.module_codes + if self.region is not None: + result['Region'] = self.region return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ConfigKeys') is not None: - self.config_keys = m.get('ConfigKeys') + if m.get('ModuleCodes') is not None: + self.module_codes = m.get('ModuleCodes') + if m.get('Region') is not None: + self.region = m.get('Region') return self -class ListConfigsResponseBodyConfigs(TeaModel): +class ListModuleConfigsResponseBodyModuleConfigsConfigs(TeaModel): def __init__( self, - config_key: str = None, - config_value: str = None, + key: str = None, + value: str = None, ): - self.config_key = config_key - self.config_value = config_value + self.key = key + self.value = value def validate(self): pass @@ -7011,35 +14262,82 @@ def to_map(self): return _map result = dict() - if self.config_key is not None: - result['ConfigKey'] = self.config_key - if self.config_value is not None: - result['ConfigValue'] = self.config_value + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + +class ListModuleConfigsResponseBodyModuleConfigs(TeaModel): + def __init__( + self, + configs: List[ListModuleConfigsResponseBodyModuleConfigsConfigs] = None, + module_code: str = None, + region: str = None, + ): + self.configs = configs + self.module_code = module_code + self.region = region + + def validate(self): + if self.configs: + for k in self.configs: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + result['Configs'] = [] + if self.configs is not None: + for k in self.configs: + result['Configs'].append(k.to_map() if k else None) + if self.module_code is not None: + result['ModuleCode'] = self.module_code + if self.region is not None: + result['Region'] = self.region return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ConfigKey') is not None: - self.config_key = m.get('ConfigKey') - if m.get('ConfigValue') is not None: - self.config_value = m.get('ConfigValue') + self.configs = [] + if m.get('Configs') is not None: + for k in m.get('Configs'): + temp_model = ListModuleConfigsResponseBodyModuleConfigsConfigs() + self.configs.append(temp_model.from_map(k)) + if m.get('ModuleCode') is not None: + self.module_code = m.get('ModuleCode') + if m.get('Region') is not None: + self.region = m.get('Region') return self -class ListConfigsResponseBody(TeaModel): +class ListModuleConfigsResponseBody(TeaModel): def __init__( self, - configs: List[ListConfigsResponseBodyConfigs] = None, + module_configs: List[ListModuleConfigsResponseBodyModuleConfigs] = None, request_id: str = None, total_count: int = None, ): - self.configs = configs + self.module_configs = module_configs self.request_id = request_id self.total_count = total_count def validate(self): - if self.configs: - for k in self.configs: + if self.module_configs: + for k in self.module_configs: if k: k.validate() @@ -7049,10 +14347,10 @@ def to_map(self): return _map result = dict() - result['Configs'] = [] - if self.configs is not None: - for k in self.configs: - result['Configs'].append(k.to_map() if k else None) + result['ModuleConfigs'] = [] + if self.module_configs is not None: + for k in self.module_configs: + result['ModuleConfigs'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id if self.total_count is not None: @@ -7061,11 +14359,11 @@ def to_map(self): def from_map(self, m: dict = None): m = m or dict() - self.configs = [] - if m.get('Configs') is not None: - for k in m.get('Configs'): - temp_model = ListConfigsResponseBodyConfigs() - self.configs.append(temp_model.from_map(k)) + self.module_configs = [] + if m.get('ModuleConfigs') is not None: + for k in m.get('ModuleConfigs'): + temp_model = ListModuleConfigsResponseBodyModuleConfigs() + self.module_configs.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') if m.get('TotalCount') is not None: @@ -7073,21 +14371,18 @@ def from_map(self, m: dict = None): return self -class ListConfigsResponse(TeaModel): +class ListModuleConfigsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListConfigsResponseBody = None, + body: ListModuleConfigsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -7112,41 +14407,29 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListConfigsResponseBody() + temp_model = ListModuleConfigsResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListDatasetsRequest(TeaModel): +class ListOperationLogsRequest(TeaModel): def __init__( self, - data_source_types: str = None, - data_types: str = None, - label: str = None, - label_keys: str = None, - label_values: str = None, - name: str = None, + entity_status: str = None, + entity_types: str = None, + operations: str = None, order: str = None, page_number: int = None, page_size: int = None, - properties: str = None, - source_id: str = None, - source_types: str = None, - workspace_id: str = None, + sort_by: str = None, ): - self.data_source_types = data_source_types - self.data_types = data_types - self.label = label - self.label_keys = label_keys - self.label_values = label_values - self.name = name + self.entity_status = entity_status + self.entity_types = entity_types + self.operations = operations self.order = order self.page_number = page_number self.page_size = page_size - self.properties = properties - self.source_id = source_id - self.source_types = source_types - self.workspace_id = workspace_id + self.sort_by = sort_by def validate(self): pass @@ -7157,79 +14440,118 @@ def to_map(self): return _map result = dict() - if self.data_source_types is not None: - result['DataSourceTypes'] = self.data_source_types - if self.data_types is not None: - result['DataTypes'] = self.data_types - if self.label is not None: - result['Label'] = self.label - if self.label_keys is not None: - result['LabelKeys'] = self.label_keys - if self.label_values is not None: - result['LabelValues'] = self.label_values - if self.name is not None: - result['Name'] = self.name + if self.entity_status is not None: + result['EntityStatus'] = self.entity_status + if self.entity_types is not None: + result['EntityTypes'] = self.entity_types + if self.operations is not None: + result['Operations'] = self.operations if self.order is not None: result['Order'] = self.order if self.page_number is not None: result['PageNumber'] = self.page_number if self.page_size is not None: result['PageSize'] = self.page_size - if self.properties is not None: - result['Properties'] = self.properties - if self.source_id is not None: - result['SourceId'] = self.source_id - if self.source_types is not None: - result['SourceTypes'] = self.source_types - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.sort_by is not None: + result['SortBy'] = self.sort_by return result def from_map(self, m: dict = None): m = m or dict() - if m.get('DataSourceTypes') is not None: - self.data_source_types = m.get('DataSourceTypes') - if m.get('DataTypes') is not None: - self.data_types = m.get('DataTypes') - if m.get('Label') is not None: - self.label = m.get('Label') - if m.get('LabelKeys') is not None: - self.label_keys = m.get('LabelKeys') - if m.get('LabelValues') is not None: - self.label_values = m.get('LabelValues') - if m.get('Name') is not None: - self.name = m.get('Name') + if m.get('EntityStatus') is not None: + self.entity_status = m.get('EntityStatus') + if m.get('EntityTypes') is not None: + self.entity_types = m.get('EntityTypes') + if m.get('Operations') is not None: + self.operations = m.get('Operations') if m.get('Order') is not None: self.order = m.get('Order') if m.get('PageNumber') is not None: self.page_number = m.get('PageNumber') if m.get('PageSize') is not None: self.page_size = m.get('PageSize') - if m.get('Properties') is not None: - self.properties = m.get('Properties') - if m.get('SourceId') is not None: - self.source_id = m.get('SourceId') - if m.get('SourceTypes') is not None: - self.source_types = m.get('SourceTypes') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') return self -class ListDatasetsResponseBody(TeaModel): +class ListOperationLogsResponseBodyLogs(TeaModel): def __init__( self, - datasets: List[Dataset] = None, + entity_id: str = None, + entity_status: str = None, + entity_type: str = None, + gmt_create_time: str = None, + message: str = None, + operation: str = None, + operator: str = None, + ): + self.entity_id = entity_id + self.entity_status = entity_status + self.entity_type = entity_type + self.gmt_create_time = gmt_create_time + self.message = message + self.operation = operation + self.operator = operator + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.entity_id is not None: + result['EntityId'] = self.entity_id + if self.entity_status is not None: + result['EntityStatus'] = self.entity_status + if self.entity_type is not None: + result['EntityType'] = self.entity_type + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.message is not None: + result['Message'] = self.message + if self.operation is not None: + result['Operation'] = self.operation + if self.operator is not None: + result['Operator'] = self.operator + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('EntityId') is not None: + self.entity_id = m.get('EntityId') + if m.get('EntityStatus') is not None: + self.entity_status = m.get('EntityStatus') + if m.get('EntityType') is not None: + self.entity_type = m.get('EntityType') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('Operation') is not None: + self.operation = m.get('Operation') + if m.get('Operator') is not None: + self.operator = m.get('Operator') + return self + + +class ListOperationLogsResponseBody(TeaModel): + def __init__( + self, + logs: List[ListOperationLogsResponseBodyLogs] = None, request_id: str = None, total_count: int = None, ): - self.datasets = datasets + self.logs = logs self.request_id = request_id self.total_count = total_count def validate(self): - if self.datasets: - for k in self.datasets: + if self.logs: + for k in self.logs: if k: k.validate() @@ -7239,10 +14561,10 @@ def to_map(self): return _map result = dict() - result['Datasets'] = [] - if self.datasets is not None: - for k in self.datasets: - result['Datasets'].append(k.to_map() if k else None) + result['Logs'] = [] + if self.logs is not None: + for k in self.logs: + result['Logs'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id if self.total_count is not None: @@ -7251,11 +14573,11 @@ def to_map(self): def from_map(self, m: dict = None): m = m or dict() - self.datasets = [] - if m.get('Datasets') is not None: - for k in m.get('Datasets'): - temp_model = Dataset() - self.datasets.append(temp_model.from_map(k)) + self.logs = [] + if m.get('Logs') is not None: + for k in m.get('Logs'): + temp_model = ListOperationLogsResponseBodyLogs() + self.logs.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') if m.get('TotalCount') is not None: @@ -7263,21 +14585,18 @@ def from_map(self, m: dict = None): return self -class ListDatasetsResponse(TeaModel): +class ListOperationLogsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListDatasetsResponseBody = None, + body: ListOperationLogsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -7302,20 +14621,58 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListDatasetsResponseBody() + temp_model = ListOperationLogsResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListFeaturesRequest(TeaModel): +class ListPermissionsResponseBodyPermissionsPermissionRules(TeaModel): + def __init__( + self, + accessibility: str = None, + entity_access_type: str = None, + ): + self.accessibility = accessibility + self.entity_access_type = entity_access_type + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + if self.entity_access_type is not None: + result['EntityAccessType'] = self.entity_access_type + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + if m.get('EntityAccessType') is not None: + self.entity_access_type = m.get('EntityAccessType') + return self + + +class ListPermissionsResponseBodyPermissions(TeaModel): def __init__( self, - names: str = None, + permission_code: str = None, + permission_rules: List[ListPermissionsResponseBodyPermissionsPermissionRules] = None, ): - self.names = names + self.permission_code = permission_code + self.permission_rules = permission_rules def validate(self): - pass + if self.permission_rules: + for k in self.permission_rules: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -7323,30 +14680,42 @@ def to_map(self): return _map result = dict() - if self.names is not None: - result['Names'] = self.names + if self.permission_code is not None: + result['PermissionCode'] = self.permission_code + result['PermissionRules'] = [] + if self.permission_rules is not None: + for k in self.permission_rules: + result['PermissionRules'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Names') is not None: - self.names = m.get('Names') + if m.get('PermissionCode') is not None: + self.permission_code = m.get('PermissionCode') + self.permission_rules = [] + if m.get('PermissionRules') is not None: + for k in m.get('PermissionRules'): + temp_model = ListPermissionsResponseBodyPermissionsPermissionRules() + self.permission_rules.append(temp_model.from_map(k)) return self -class ListFeaturesResponseBody(TeaModel): +class ListPermissionsResponseBody(TeaModel): def __init__( self, - features: List[str] = None, + permissions: List[ListPermissionsResponseBodyPermissions] = None, request_id: str = None, total_count: int = None, ): - self.features = features + self.permissions = permissions self.request_id = request_id self.total_count = total_count def validate(self): - pass + if self.permissions: + for k in self.permissions: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -7354,8 +14723,10 @@ def to_map(self): return _map result = dict() - if self.features is not None: - result['Features'] = self.features + result['Permissions'] = [] + if self.permissions is not None: + for k in self.permissions: + result['Permissions'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id if self.total_count is not None: @@ -7364,8 +14735,11 @@ def to_map(self): def from_map(self, m: dict = None): m = m or dict() - if m.get('Features') is not None: - self.features = m.get('Features') + self.permissions = [] + if m.get('Permissions') is not None: + for k in m.get('Permissions'): + temp_model = ListPermissionsResponseBodyPermissions() + self.permissions.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') if m.get('TotalCount') is not None: @@ -7373,21 +14747,18 @@ def from_map(self, m: dict = None): return self -class ListFeaturesResponse(TeaModel): +class ListPermissionsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListFeaturesResponseBody = None, + body: ListPermissionsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -7412,19 +14783,17 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListFeaturesResponseBody() + temp_model = ListPermissionsResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListGlobalPermissionsResponseBodyPermissionsPermissionRules(TeaModel): +class ListProductAuthorizationsRequest(TeaModel): def __init__( self, - accessibility: str = None, - entity_access_type: str = None, + ram_role_names: str = None, ): - self.accessibility = accessibility - self.entity_access_type = entity_access_type + self.ram_role_names = ram_role_names def validate(self): pass @@ -7435,35 +14804,34 @@ def to_map(self): return _map result = dict() - if self.accessibility is not None: - result['Accessibility'] = self.accessibility - if self.entity_access_type is not None: - result['EntityAccessType'] = self.entity_access_type + if self.ram_role_names is not None: + result['RamRoleNames'] = self.ram_role_names return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Accessibility') is not None: - self.accessibility = m.get('Accessibility') - if m.get('EntityAccessType') is not None: - self.entity_access_type = m.get('EntityAccessType') + if m.get('RamRoleNames') is not None: + self.ram_role_names = m.get('RamRoleNames') return self -class ListGlobalPermissionsResponseBodyPermissions(TeaModel): +class ListProductAuthorizationsResponseBodyAuthorizationDetails(TeaModel): def __init__( self, - permission_code: str = None, - permission_rules: List[ListGlobalPermissionsResponseBodyPermissionsPermissionRules] = None, + authorization_url: str = None, + is_authorized: bool = None, + ram_role_arn: str = None, + ram_role_name: str = None, + ram_role_type: str = None, ): - self.permission_code = permission_code - self.permission_rules = permission_rules + self.authorization_url = authorization_url + self.is_authorized = is_authorized + self.ram_role_arn = ram_role_arn + self.ram_role_name = ram_role_name + self.ram_role_type = ram_role_type def validate(self): - if self.permission_rules: - for k in self.permission_rules: - if k: - k.validate() + pass def to_map(self): _map = super().to_map() @@ -7471,38 +14839,47 @@ def to_map(self): return _map result = dict() - if self.permission_code is not None: - result['PermissionCode'] = self.permission_code - result['PermissionRules'] = [] - if self.permission_rules is not None: - for k in self.permission_rules: - result['PermissionRules'].append(k.to_map() if k else None) + if self.authorization_url is not None: + result['AuthorizationUrl'] = self.authorization_url + if self.is_authorized is not None: + result['IsAuthorized'] = self.is_authorized + if self.ram_role_arn is not None: + result['RamRoleARN'] = self.ram_role_arn + if self.ram_role_name is not None: + result['RamRoleName'] = self.ram_role_name + if self.ram_role_type is not None: + result['RamRoleType'] = self.ram_role_type return result def from_map(self, m: dict = None): m = m or dict() - if m.get('PermissionCode') is not None: - self.permission_code = m.get('PermissionCode') - self.permission_rules = [] - if m.get('PermissionRules') is not None: - for k in m.get('PermissionRules'): - temp_model = ListGlobalPermissionsResponseBodyPermissionsPermissionRules() - self.permission_rules.append(temp_model.from_map(k)) + if m.get('AuthorizationUrl') is not None: + self.authorization_url = m.get('AuthorizationUrl') + if m.get('IsAuthorized') is not None: + self.is_authorized = m.get('IsAuthorized') + if m.get('RamRoleARN') is not None: + self.ram_role_arn = m.get('RamRoleARN') + if m.get('RamRoleName') is not None: + self.ram_role_name = m.get('RamRoleName') + if m.get('RamRoleType') is not None: + self.ram_role_type = m.get('RamRoleType') return self -class ListGlobalPermissionsResponseBody(TeaModel): +class ListProductAuthorizationsResponseBody(TeaModel): def __init__( self, - permissions: List[ListGlobalPermissionsResponseBodyPermissions] = None, + authorization_details: List[ListProductAuthorizationsResponseBodyAuthorizationDetails] = None, + authorization_url: str = None, request_id: str = None, ): - self.permissions = permissions + self.authorization_details = authorization_details + self.authorization_url = authorization_url self.request_id = request_id def validate(self): - if self.permissions: - for k in self.permissions: + if self.authorization_details: + for k in self.authorization_details: if k: k.validate() @@ -7512,41 +14889,42 @@ def to_map(self): return _map result = dict() - result['Permissions'] = [] - if self.permissions is not None: - for k in self.permissions: - result['Permissions'].append(k.to_map() if k else None) + result['AuthorizationDetails'] = [] + if self.authorization_details is not None: + for k in self.authorization_details: + result['AuthorizationDetails'].append(k.to_map() if k else None) + if self.authorization_url is not None: + result['AuthorizationUrl'] = self.authorization_url if self.request_id is not None: result['RequestId'] = self.request_id return result def from_map(self, m: dict = None): m = m or dict() - self.permissions = [] - if m.get('Permissions') is not None: - for k in m.get('Permissions'): - temp_model = ListGlobalPermissionsResponseBodyPermissions() - self.permissions.append(temp_model.from_map(k)) + self.authorization_details = [] + if m.get('AuthorizationDetails') is not None: + for k in m.get('AuthorizationDetails'): + temp_model = ListProductAuthorizationsResponseBodyAuthorizationDetails() + self.authorization_details.append(temp_model.from_map(k)) + if m.get('AuthorizationUrl') is not None: + self.authorization_url = m.get('AuthorizationUrl') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') return self -class ListGlobalPermissionsResponse(TeaModel): +class ListProductAuthorizationsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListGlobalPermissionsResponseBody = None, + body: ListProductAuthorizationsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -7571,25 +14949,21 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListGlobalPermissionsResponseBody() + temp_model = ListProductAuthorizationsResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListImageLabelsRequest(TeaModel): +class ListProductsRequest(TeaModel): def __init__( self, - image_id: str = None, - label_filter: str = None, - label_keys: str = None, - region: str = None, - workspace_id: str = None, + product_codes: str = None, + service_codes: str = None, + verbose: bool = None, ): - self.image_id = image_id - self.label_filter = label_filter - self.label_keys = label_keys - self.region = region - self.workspace_id = workspace_id + self.product_codes = product_codes + self.service_codes = service_codes + self.verbose = verbose def validate(self): pass @@ -7600,41 +14974,86 @@ def to_map(self): return _map result = dict() - if self.image_id is not None: - result['ImageId'] = self.image_id - if self.label_filter is not None: - result['LabelFilter'] = self.label_filter - if self.label_keys is not None: - result['LabelKeys'] = self.label_keys - if self.region is not None: - result['Region'] = self.region - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.product_codes is not None: + result['ProductCodes'] = self.product_codes + if self.service_codes is not None: + result['ServiceCodes'] = self.service_codes + if self.verbose is not None: + result['Verbose'] = self.verbose + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ProductCodes') is not None: + self.product_codes = m.get('ProductCodes') + if m.get('ServiceCodes') is not None: + self.service_codes = m.get('ServiceCodes') + if m.get('Verbose') is not None: + self.verbose = m.get('Verbose') + return self + + +class ListProductsResponseBodyProducts(TeaModel): + def __init__( + self, + has_permission_to_purchase: bool = None, + is_purchased: bool = None, + product_code: str = None, + product_instance_id: str = None, + purchase_url: str = None, + ): + self.has_permission_to_purchase = has_permission_to_purchase + self.is_purchased = is_purchased + self.product_code = product_code + self.product_instance_id = product_instance_id + self.purchase_url = purchase_url + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.has_permission_to_purchase is not None: + result['HasPermissionToPurchase'] = self.has_permission_to_purchase + if self.is_purchased is not None: + result['IsPurchased'] = self.is_purchased + if self.product_code is not None: + result['ProductCode'] = self.product_code + if self.product_instance_id is not None: + result['ProductInstanceId'] = self.product_instance_id + if self.purchase_url is not None: + result['PurchaseUrl'] = self.purchase_url return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ImageId') is not None: - self.image_id = m.get('ImageId') - if m.get('LabelFilter') is not None: - self.label_filter = m.get('LabelFilter') - if m.get('LabelKeys') is not None: - self.label_keys = m.get('LabelKeys') - if m.get('Region') is not None: - self.region = m.get('Region') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') + if m.get('HasPermissionToPurchase') is not None: + self.has_permission_to_purchase = m.get('HasPermissionToPurchase') + if m.get('IsPurchased') is not None: + self.is_purchased = m.get('IsPurchased') + if m.get('ProductCode') is not None: + self.product_code = m.get('ProductCode') + if m.get('ProductInstanceId') is not None: + self.product_instance_id = m.get('ProductInstanceId') + if m.get('PurchaseUrl') is not None: + self.purchase_url = m.get('PurchaseUrl') return self -class ListImageLabelsResponseBodyLabels(TeaModel): +class ListProductsResponseBodyServices(TeaModel): def __init__( self, - key: str = None, - value: str = None, + is_open: bool = None, + open_url: str = None, + service_code: str = None, ): - self.key = key - self.value = value + self.is_open = is_open + self.open_url = open_url + self.service_code = service_code def validate(self): pass @@ -7645,35 +15064,43 @@ def to_map(self): return _map result = dict() - if self.key is not None: - result['Key'] = self.key - if self.value is not None: - result['Value'] = self.value + if self.is_open is not None: + result['IsOpen'] = self.is_open + if self.open_url is not None: + result['OpenUrl'] = self.open_url + if self.service_code is not None: + result['ServiceCode'] = self.service_code return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Key') is not None: - self.key = m.get('Key') - if m.get('Value') is not None: - self.value = m.get('Value') + if m.get('IsOpen') is not None: + self.is_open = m.get('IsOpen') + if m.get('OpenUrl') is not None: + self.open_url = m.get('OpenUrl') + if m.get('ServiceCode') is not None: + self.service_code = m.get('ServiceCode') return self -class ListImageLabelsResponseBody(TeaModel): +class ListProductsResponseBody(TeaModel): def __init__( self, - labels: List[ListImageLabelsResponseBodyLabels] = None, + products: List[ListProductsResponseBodyProducts] = None, request_id: str = None, - total_count: int = None, + services: List[ListProductsResponseBodyServices] = None, ): - self.labels = labels + self.products = products self.request_id = request_id - self.total_count = total_count + self.services = services def validate(self): - if self.labels: - for k in self.labels: + if self.products: + for k in self.products: + if k: + k.validate() + if self.services: + for k in self.services: if k: k.validate() @@ -7683,45 +15110,47 @@ def to_map(self): return _map result = dict() - result['Labels'] = [] - if self.labels is not None: - for k in self.labels: - result['Labels'].append(k.to_map() if k else None) + result['Products'] = [] + if self.products is not None: + for k in self.products: + result['Products'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id - if self.total_count is not None: - result['TotalCount'] = self.total_count + result['Services'] = [] + if self.services is not None: + for k in self.services: + result['Services'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() - self.labels = [] - if m.get('Labels') is not None: - for k in m.get('Labels'): - temp_model = ListImageLabelsResponseBodyLabels() - self.labels.append(temp_model.from_map(k)) + self.products = [] + if m.get('Products') is not None: + for k in m.get('Products'): + temp_model = ListProductsResponseBodyProducts() + self.products.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('TotalCount') is not None: - self.total_count = m.get('TotalCount') + self.services = [] + if m.get('Services') is not None: + for k in m.get('Services'): + temp_model = ListProductsResponseBodyServices() + self.services.append(temp_model.from_map(k)) return self -class ListImageLabelsResponse(TeaModel): +class ListProductsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListImageLabelsResponseBody = None, + body: ListProductsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -7746,37 +15175,17 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListImageLabelsResponseBody() + temp_model = ListProductsResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListImagesRequest(TeaModel): +class ListQuotasRequest(TeaModel): def __init__( self, - labels: str = None, name: str = None, - order: str = None, - page_number: int = None, - page_size: int = None, - parent_user_id: str = None, - query: str = None, - sort_by: str = None, - user_id: str = None, - verbose: bool = None, - workspace_id: str = None, ): - self.labels = labels self.name = name - self.order = order - self.page_number = page_number - self.page_size = page_size - self.parent_user_id = parent_user_id - self.query = query - self.sort_by = sort_by - self.user_id = user_id - self.verbose = verbose - self.workspace_id = workspace_id def validate(self): pass @@ -7787,64 +15196,26 @@ def to_map(self): return _map result = dict() - if self.labels is not None: - result['Labels'] = self.labels if self.name is not None: result['Name'] = self.name - if self.order is not None: - result['Order'] = self.order - if self.page_number is not None: - result['PageNumber'] = self.page_number - if self.page_size is not None: - result['PageSize'] = self.page_size - if self.parent_user_id is not None: - result['ParentUserId'] = self.parent_user_id - if self.query is not None: - result['Query'] = self.query - if self.sort_by is not None: - result['SortBy'] = self.sort_by - if self.user_id is not None: - result['UserId'] = self.user_id - if self.verbose is not None: - result['Verbose'] = self.verbose - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Labels') is not None: - self.labels = m.get('Labels') if m.get('Name') is not None: self.name = m.get('Name') - if m.get('Order') is not None: - self.order = m.get('Order') - if m.get('PageNumber') is not None: - self.page_number = m.get('PageNumber') - if m.get('PageSize') is not None: - self.page_size = m.get('PageSize') - if m.get('ParentUserId') is not None: - self.parent_user_id = m.get('ParentUserId') - if m.get('Query') is not None: - self.query = m.get('Query') - if m.get('SortBy') is not None: - self.sort_by = m.get('SortBy') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') - if m.get('Verbose') is not None: - self.verbose = m.get('Verbose') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') return self -class ListImagesResponseBodyImagesLabels(TeaModel): +class ListQuotasResponseBodyQuotasSpecs(TeaModel): def __init__( self, - key: str = None, + name: str = None, + type: str = None, value: str = None, ): - self.key = key + self.name = name + self.type = type self.value = value def validate(self): @@ -7856,130 +15227,110 @@ def to_map(self): return _map result = dict() - if self.key is not None: - result['Key'] = self.key + if self.name is not None: + result['Name'] = self.name + if self.type is not None: + result['Type'] = self.type if self.value is not None: result['Value'] = self.value return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Key') is not None: - self.key = m.get('Key') + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('Type') is not None: + self.type = m.get('Type') if m.get('Value') is not None: self.value = m.get('Value') return self -class ListImagesResponseBodyImages(TeaModel): +class ListQuotasResponseBodyQuotas(TeaModel): def __init__( self, - accessibility: str = None, - description: str = None, - gmt_create_time: str = None, - gmt_modified_time: str = None, - image_id: str = None, - image_uri: str = None, - labels: List[ListImagesResponseBodyImagesLabels] = None, + display_name: str = None, + id: str = None, + mode: str = None, name: str = None, - parent_user_id: str = None, - user_id: str = None, - workspace_id: str = None, + product_code: str = None, + quota_type: str = None, + specs: List[ListQuotasResponseBodyQuotasSpecs] = None, ): - self.accessibility = accessibility - self.description = description - self.gmt_create_time = gmt_create_time - self.gmt_modified_time = gmt_modified_time - self.image_id = image_id - self.image_uri = image_uri - self.labels = labels + self.display_name = display_name + self.id = id + self.mode = mode self.name = name - self.parent_user_id = parent_user_id - self.user_id = user_id - self.workspace_id = workspace_id + self.product_code = product_code + self.quota_type = quota_type + self.specs = specs def validate(self): - if self.labels: - for k in self.labels: - if k: - k.validate() - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.accessibility is not None: - result['Accessibility'] = self.accessibility - if self.description is not None: - result['Description'] = self.description - if self.gmt_create_time is not None: - result['GmtCreateTime'] = self.gmt_create_time - if self.gmt_modified_time is not None: - result['GmtModifiedTime'] = self.gmt_modified_time - if self.image_id is not None: - result['ImageId'] = self.image_id - if self.image_uri is not None: - result['ImageUri'] = self.image_uri - result['Labels'] = [] - if self.labels is not None: - for k in self.labels: - result['Labels'].append(k.to_map() if k else None) + if self.specs: + for k in self.specs: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.display_name is not None: + result['DisplayName'] = self.display_name + if self.id is not None: + result['Id'] = self.id + if self.mode is not None: + result['Mode'] = self.mode if self.name is not None: result['Name'] = self.name - if self.parent_user_id is not None: - result['ParentUserId'] = self.parent_user_id - if self.user_id is not None: - result['UserId'] = self.user_id - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.product_code is not None: + result['ProductCode'] = self.product_code + if self.quota_type is not None: + result['QuotaType'] = self.quota_type + result['Specs'] = [] + if self.specs is not None: + for k in self.specs: + result['Specs'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Accessibility') is not None: - self.accessibility = m.get('Accessibility') - if m.get('Description') is not None: - self.description = m.get('Description') - if m.get('GmtCreateTime') is not None: - self.gmt_create_time = m.get('GmtCreateTime') - if m.get('GmtModifiedTime') is not None: - self.gmt_modified_time = m.get('GmtModifiedTime') - if m.get('ImageId') is not None: - self.image_id = m.get('ImageId') - if m.get('ImageUri') is not None: - self.image_uri = m.get('ImageUri') - self.labels = [] - if m.get('Labels') is not None: - for k in m.get('Labels'): - temp_model = ListImagesResponseBodyImagesLabels() - self.labels.append(temp_model.from_map(k)) + if m.get('DisplayName') is not None: + self.display_name = m.get('DisplayName') + if m.get('Id') is not None: + self.id = m.get('Id') + if m.get('Mode') is not None: + self.mode = m.get('Mode') if m.get('Name') is not None: self.name = m.get('Name') - if m.get('ParentUserId') is not None: - self.parent_user_id = m.get('ParentUserId') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') + if m.get('ProductCode') is not None: + self.product_code = m.get('ProductCode') + if m.get('QuotaType') is not None: + self.quota_type = m.get('QuotaType') + self.specs = [] + if m.get('Specs') is not None: + for k in m.get('Specs'): + temp_model = ListQuotasResponseBodyQuotasSpecs() + self.specs.append(temp_model.from_map(k)) return self -class ListImagesResponseBody(TeaModel): +class ListQuotasResponseBody(TeaModel): def __init__( self, - images: List[ListImagesResponseBodyImages] = None, + quotas: List[ListQuotasResponseBodyQuotas] = None, request_id: str = None, total_count: int = None, ): - self.images = images + self.quotas = quotas self.request_id = request_id self.total_count = total_count def validate(self): - if self.images: - for k in self.images: + if self.quotas: + for k in self.quotas: if k: k.validate() @@ -7989,10 +15340,10 @@ def to_map(self): return _map result = dict() - result['Images'] = [] - if self.images is not None: - for k in self.images: - result['Images'].append(k.to_map() if k else None) + result['Quotas'] = [] + if self.quotas is not None: + for k in self.quotas: + result['Quotas'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id if self.total_count is not None: @@ -8001,11 +15352,11 @@ def to_map(self): def from_map(self, m: dict = None): m = m or dict() - self.images = [] - if m.get('Images') is not None: - for k in m.get('Images'): - temp_model = ListImagesResponseBodyImages() - self.images.append(temp_model.from_map(k)) + self.quotas = [] + if m.get('Quotas') is not None: + for k in m.get('Quotas'): + temp_model = ListQuotasResponseBodyQuotas() + self.quotas.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') if m.get('TotalCount') is not None: @@ -8013,21 +15364,18 @@ def from_map(self, m: dict = None): return self -class ListImagesResponse(TeaModel): +class ListQuotasResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListImagesResponseBody = None, + body: ListQuotasResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -8052,23 +15400,39 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListImagesResponseBody() + temp_model = ListQuotasResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListMembersRequest(TeaModel): +class ListResourcesRequest(TeaModel): def __init__( self, - member_name: str = None, + group_name: str = None, + labels: str = None, + option: str = None, page_number: int = None, page_size: int = None, - roles: str = None, + product_types: str = None, + quota_ids: str = None, + resource_name: str = None, + resource_types: str = None, + verbose: bool = None, + verbose_fields: str = None, + workspace_id: str = None, ): - self.member_name = member_name + self.group_name = group_name + self.labels = labels + self.option = option self.page_number = page_number self.page_size = page_size - self.roles = roles + self.product_types = product_types + self.quota_ids = quota_ids + self.resource_name = resource_name + self.resource_types = resource_types + self.verbose = verbose + self.verbose_fields = verbose_fields + self.workspace_id = workspace_id def validate(self): pass @@ -8079,45 +15443,71 @@ def to_map(self): return _map result = dict() - if self.member_name is not None: - result['MemberName'] = self.member_name + if self.group_name is not None: + result['GroupName'] = self.group_name + if self.labels is not None: + result['Labels'] = self.labels + if self.option is not None: + result['Option'] = self.option if self.page_number is not None: result['PageNumber'] = self.page_number if self.page_size is not None: result['PageSize'] = self.page_size - if self.roles is not None: - result['Roles'] = self.roles + if self.product_types is not None: + result['ProductTypes'] = self.product_types + if self.quota_ids is not None: + result['QuotaIds'] = self.quota_ids + if self.resource_name is not None: + result['ResourceName'] = self.resource_name + if self.resource_types is not None: + result['ResourceTypes'] = self.resource_types + if self.verbose is not None: + result['Verbose'] = self.verbose + if self.verbose_fields is not None: + result['VerboseFields'] = self.verbose_fields + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('MemberName') is not None: - self.member_name = m.get('MemberName') + if m.get('GroupName') is not None: + self.group_name = m.get('GroupName') + if m.get('Labels') is not None: + self.labels = m.get('Labels') + if m.get('Option') is not None: + self.option = m.get('Option') if m.get('PageNumber') is not None: self.page_number = m.get('PageNumber') if m.get('PageSize') is not None: self.page_size = m.get('PageSize') - if m.get('Roles') is not None: - self.roles = m.get('Roles') + if m.get('ProductTypes') is not None: + self.product_types = m.get('ProductTypes') + if m.get('QuotaIds') is not None: + self.quota_ids = m.get('QuotaIds') + if m.get('ResourceName') is not None: + self.resource_name = m.get('ResourceName') + if m.get('ResourceTypes') is not None: + self.resource_types = m.get('ResourceTypes') + if m.get('Verbose') is not None: + self.verbose = m.get('Verbose') + if m.get('VerboseFields') is not None: + self.verbose_fields = m.get('VerboseFields') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class ListMembersResponseBodyMembers(TeaModel): +class ListResourcesResponseBodyResourcesEncryption(TeaModel): def __init__( self, - display_name: str = None, - gmt_create_time: str = None, - member_id: str = None, - member_name: str = None, - roles: List[str] = None, - user_id: str = None, + algorithm: str = None, + enabled: bool = None, + key: str = None, ): - self.display_name = display_name - self.gmt_create_time = gmt_create_time - self.member_id = member_id - self.member_name = member_name - self.roles = roles - self.user_id = user_id + self.algorithm = algorithm + self.enabled = enabled + self.key = key def validate(self): pass @@ -8128,101 +15518,63 @@ def to_map(self): return _map result = dict() - if self.display_name is not None: - result['DisplayName'] = self.display_name - if self.gmt_create_time is not None: - result['GmtCreateTime'] = self.gmt_create_time - if self.member_id is not None: - result['MemberId'] = self.member_id - if self.member_name is not None: - result['MemberName'] = self.member_name - if self.roles is not None: - result['Roles'] = self.roles - if self.user_id is not None: - result['UserId'] = self.user_id + if self.algorithm is not None: + result['Algorithm'] = self.algorithm + if self.enabled is not None: + result['Enabled'] = self.enabled + if self.key is not None: + result['Key'] = self.key return result def from_map(self, m: dict = None): m = m or dict() - if m.get('DisplayName') is not None: - self.display_name = m.get('DisplayName') - if m.get('GmtCreateTime') is not None: - self.gmt_create_time = m.get('GmtCreateTime') - if m.get('MemberId') is not None: - self.member_id = m.get('MemberId') - if m.get('MemberName') is not None: - self.member_name = m.get('MemberName') - if m.get('Roles') is not None: - self.roles = m.get('Roles') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') + if m.get('Algorithm') is not None: + self.algorithm = m.get('Algorithm') + if m.get('Enabled') is not None: + self.enabled = m.get('Enabled') + if m.get('Key') is not None: + self.key = m.get('Key') return self -class ListMembersResponseBody(TeaModel): +class ListResourcesResponseBodyResourcesExecutor(TeaModel): def __init__( self, - members: List[ListMembersResponseBodyMembers] = None, - request_id: str = None, - total_count: int = None, + owner_id: str = None, ): - self.members = members - self.request_id = request_id - self.total_count = total_count + self.owner_id = owner_id def validate(self): - if self.members: - for k in self.members: - if k: - k.validate() + pass def to_map(self): _map = super().to_map() if _map is not None: return _map - result = dict() - result['Members'] = [] - if self.members is not None: - for k in self.members: - result['Members'].append(k.to_map() if k else None) - if self.request_id is not None: - result['RequestId'] = self.request_id - if self.total_count is not None: - result['TotalCount'] = self.total_count - return result - - def from_map(self, m: dict = None): - m = m or dict() - self.members = [] - if m.get('Members') is not None: - for k in m.get('Members'): - temp_model = ListMembersResponseBodyMembers() - self.members.append(temp_model.from_map(k)) - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') - if m.get('TotalCount') is not None: - self.total_count = m.get('TotalCount') + result = dict() + if self.owner_id is not None: + result['OwnerId'] = self.owner_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('OwnerId') is not None: + self.owner_id = m.get('OwnerId') return self -class ListMembersResponse(TeaModel): +class ListResourcesResponseBodyResourcesLabels(TeaModel): def __init__( self, - headers: Dict[str, str] = None, - status_code: int = None, - body: ListMembersResponseBody = None, + key: str = None, + value: str = None, ): - self.headers = headers - self.status_code = status_code - self.body = body + self.key = key + self.value = value def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() + pass def to_map(self): _map = super().to_map() @@ -8230,32 +15582,29 @@ def to_map(self): return _map result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = ListMembersResponseBody() - self.body = temp_model.from_map(m['body']) + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') return self -class ListModelDomainsRequest(TeaModel): +class ListResourcesResponseBodyResourcesQuotasSpecs(TeaModel): def __init__( self, - model_domain_ids: str = None, + name: str = None, + value: str = None, ): - self.model_domain_ids = model_domain_ids + self.name = name + self.value = value def validate(self): pass @@ -8266,34 +15615,47 @@ def to_map(self): return _map result = dict() - if self.model_domain_ids is not None: - result['ModelDomainIds'] = self.model_domain_ids + if self.name is not None: + result['Name'] = self.name + if self.value is not None: + result['Value'] = self.value return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ModelDomainIds') is not None: - self.model_domain_ids = m.get('ModelDomainIds') + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('Value') is not None: + self.value = m.get('Value') return self -class ListModelDomainsResponseBodyModelDomainsModelTasks(TeaModel): +class ListResourcesResponseBodyResourcesQuotas(TeaModel): def __init__( self, - model_domain_id: str = None, - model_task_id: str = None, - model_task_name: str = None, - order_number: int = None, - search_words: str = None, + card_type: str = None, + display_name: str = None, + id: str = None, + mode: str = None, + name: str = None, + product_code: str = None, + quota_type: str = None, + specs: List[ListResourcesResponseBodyResourcesQuotasSpecs] = None, ): - self.model_domain_id = model_domain_id - self.model_task_id = model_task_id - self.model_task_name = model_task_name - self.order_number = order_number - self.search_words = search_words + self.card_type = card_type + self.display_name = display_name + self.id = id + self.mode = mode + self.name = name + self.product_code = product_code + self.quota_type = quota_type + self.specs = specs def validate(self): - pass + if self.specs: + for k in self.specs: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -8301,49 +15663,94 @@ def to_map(self): return _map result = dict() - if self.model_domain_id is not None: - result['ModelDomainId'] = self.model_domain_id - if self.model_task_id is not None: - result['ModelTaskId'] = self.model_task_id - if self.model_task_name is not None: - result['ModelTaskName'] = self.model_task_name - if self.order_number is not None: - result['OrderNumber'] = self.order_number - if self.search_words is not None: - result['SearchWords'] = self.search_words + if self.card_type is not None: + result['CardType'] = self.card_type + if self.display_name is not None: + result['DisplayName'] = self.display_name + if self.id is not None: + result['Id'] = self.id + if self.mode is not None: + result['Mode'] = self.mode + if self.name is not None: + result['Name'] = self.name + if self.product_code is not None: + result['ProductCode'] = self.product_code + if self.quota_type is not None: + result['QuotaType'] = self.quota_type + result['Specs'] = [] + if self.specs is not None: + for k in self.specs: + result['Specs'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ModelDomainId') is not None: - self.model_domain_id = m.get('ModelDomainId') - if m.get('ModelTaskId') is not None: - self.model_task_id = m.get('ModelTaskId') - if m.get('ModelTaskName') is not None: - self.model_task_name = m.get('ModelTaskName') - if m.get('OrderNumber') is not None: - self.order_number = m.get('OrderNumber') - if m.get('SearchWords') is not None: - self.search_words = m.get('SearchWords') + if m.get('CardType') is not None: + self.card_type = m.get('CardType') + if m.get('DisplayName') is not None: + self.display_name = m.get('DisplayName') + if m.get('Id') is not None: + self.id = m.get('Id') + if m.get('Mode') is not None: + self.mode = m.get('Mode') + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('ProductCode') is not None: + self.product_code = m.get('ProductCode') + if m.get('QuotaType') is not None: + self.quota_type = m.get('QuotaType') + self.specs = [] + if m.get('Specs') is not None: + for k in m.get('Specs'): + temp_model = ListResourcesResponseBodyResourcesQuotasSpecs() + self.specs.append(temp_model.from_map(k)) return self -class ListModelDomainsResponseBodyModelDomains(TeaModel): +class ListResourcesResponseBodyResources(TeaModel): def __init__( self, - model_domain_id: str = None, - model_domain_name: str = None, - model_tasks: List[ListModelDomainsResponseBodyModelDomainsModelTasks] = None, - order_number: int = None, + encryption: ListResourcesResponseBodyResourcesEncryption = None, + env_type: str = None, + executor: ListResourcesResponseBodyResourcesExecutor = None, + gmt_create_time: str = None, + group_name: str = None, + id: str = None, + is_default: bool = None, + labels: List[ListResourcesResponseBodyResourcesLabels] = None, + name: str = None, + product_type: str = None, + quotas: List[ListResourcesResponseBodyResourcesQuotas] = None, + resource_type: str = None, + spec: Dict[str, Any] = None, + workspace_id: str = None, ): - self.model_domain_id = model_domain_id - self.model_domain_name = model_domain_name - self.model_tasks = model_tasks - self.order_number = order_number + self.encryption = encryption + self.env_type = env_type + self.executor = executor + self.gmt_create_time = gmt_create_time + self.group_name = group_name + self.id = id + self.is_default = is_default + self.labels = labels + self.name = name + self.product_type = product_type + self.quotas = quotas + self.resource_type = resource_type + self.spec = spec + self.workspace_id = workspace_id def validate(self): - if self.model_tasks: - for k in self.model_tasks: + if self.encryption: + self.encryption.validate() + if self.executor: + self.executor.validate() + if self.labels: + for k in self.labels: + if k: + k.validate() + if self.quotas: + for k in self.quotas: if k: k.validate() @@ -8353,48 +15760,95 @@ def to_map(self): return _map result = dict() - if self.model_domain_id is not None: - result['ModelDomainId'] = self.model_domain_id - if self.model_domain_name is not None: - result['ModelDomainName'] = self.model_domain_name - result['ModelTasks'] = [] - if self.model_tasks is not None: - for k in self.model_tasks: - result['ModelTasks'].append(k.to_map() if k else None) - if self.order_number is not None: - result['OrderNumber'] = self.order_number + if self.encryption is not None: + result['Encryption'] = self.encryption.to_map() + if self.env_type is not None: + result['EnvType'] = self.env_type + if self.executor is not None: + result['Executor'] = self.executor.to_map() + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.group_name is not None: + result['GroupName'] = self.group_name + if self.id is not None: + result['Id'] = self.id + if self.is_default is not None: + result['IsDefault'] = self.is_default + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.name is not None: + result['Name'] = self.name + if self.product_type is not None: + result['ProductType'] = self.product_type + result['Quotas'] = [] + if self.quotas is not None: + for k in self.quotas: + result['Quotas'].append(k.to_map() if k else None) + if self.resource_type is not None: + result['ResourceType'] = self.resource_type + if self.spec is not None: + result['Spec'] = self.spec + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('ModelDomainId') is not None: - self.model_domain_id = m.get('ModelDomainId') - if m.get('ModelDomainName') is not None: - self.model_domain_name = m.get('ModelDomainName') - self.model_tasks = [] - if m.get('ModelTasks') is not None: - for k in m.get('ModelTasks'): - temp_model = ListModelDomainsResponseBodyModelDomainsModelTasks() - self.model_tasks.append(temp_model.from_map(k)) - if m.get('OrderNumber') is not None: - self.order_number = m.get('OrderNumber') + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Encryption') is not None: + temp_model = ListResourcesResponseBodyResourcesEncryption() + self.encryption = temp_model.from_map(m['Encryption']) + if m.get('EnvType') is not None: + self.env_type = m.get('EnvType') + if m.get('Executor') is not None: + temp_model = ListResourcesResponseBodyResourcesExecutor() + self.executor = temp_model.from_map(m['Executor']) + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GroupName') is not None: + self.group_name = m.get('GroupName') + if m.get('Id') is not None: + self.id = m.get('Id') + if m.get('IsDefault') is not None: + self.is_default = m.get('IsDefault') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = ListResourcesResponseBodyResourcesLabels() + self.labels.append(temp_model.from_map(k)) + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('ProductType') is not None: + self.product_type = m.get('ProductType') + self.quotas = [] + if m.get('Quotas') is not None: + for k in m.get('Quotas'): + temp_model = ListResourcesResponseBodyResourcesQuotas() + self.quotas.append(temp_model.from_map(k)) + if m.get('ResourceType') is not None: + self.resource_type = m.get('ResourceType') + if m.get('Spec') is not None: + self.spec = m.get('Spec') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class ListModelDomainsResponseBody(TeaModel): +class ListResourcesResponseBody(TeaModel): def __init__( self, - model_domains: List[ListModelDomainsResponseBodyModelDomains] = None, request_id: str = None, + resources: List[ListResourcesResponseBodyResources] = None, total_count: int = None, ): - self.model_domains = model_domains self.request_id = request_id + self.resources = resources self.total_count = total_count def validate(self): - if self.model_domains: - for k in self.model_domains: + if self.resources: + for k in self.resources: if k: k.validate() @@ -8404,45 +15858,42 @@ def to_map(self): return _map result = dict() - result['ModelDomains'] = [] - if self.model_domains is not None: - for k in self.model_domains: - result['ModelDomains'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id + result['Resources'] = [] + if self.resources is not None: + for k in self.resources: + result['Resources'].append(k.to_map() if k else None) if self.total_count is not None: result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() - self.model_domains = [] - if m.get('ModelDomains') is not None: - for k in m.get('ModelDomains'): - temp_model = ListModelDomainsResponseBodyModelDomains() - self.model_domains.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + self.resources = [] + if m.get('Resources') is not None: + for k in m.get('Resources'): + temp_model = ListResourcesResponseBodyResources() + self.resources.append(temp_model.from_map(k)) if m.get('TotalCount') is not None: self.total_count = m.get('TotalCount') return self -class ListModelDomainsResponse(TeaModel): +class ListResourcesResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListModelDomainsResponseBody = None, + body: ListResourcesResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -8467,41 +15918,31 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListModelDomainsResponseBody() + temp_model = ListResourcesResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListModelVersionsRequest(TeaModel): +class ListServiceTemplatesRequest(TeaModel): def __init__( self, - approval_status: str = None, - format_type: str = None, - framework_type: str = None, label: str = None, - label_string: str = None, - labels: str = None, order: str = None, page_number: int = None, page_size: int = None, + provider: str = None, + query: str = None, + service_template_name: str = None, sort_by: str = None, - source_id: str = None, - source_type: str = None, - version_name: str = None, ): - self.approval_status = approval_status - self.format_type = format_type - self.framework_type = framework_type self.label = label - self.label_string = label_string - self.labels = labels self.order = order self.page_number = page_number self.page_size = page_size + self.provider = provider + self.query = query + self.service_template_name = service_template_name self.sort_by = sort_by - self.source_id = source_id - self.source_type = source_type - self.version_name = version_name def validate(self): pass @@ -8512,79 +15953,59 @@ def to_map(self): return _map result = dict() - if self.approval_status is not None: - result['ApprovalStatus'] = self.approval_status - if self.format_type is not None: - result['FormatType'] = self.format_type - if self.framework_type is not None: - result['FrameworkType'] = self.framework_type if self.label is not None: result['Label'] = self.label - if self.label_string is not None: - result['LabelString'] = self.label_string - if self.labels is not None: - result['Labels'] = self.labels if self.order is not None: result['Order'] = self.order if self.page_number is not None: result['PageNumber'] = self.page_number if self.page_size is not None: result['PageSize'] = self.page_size + if self.provider is not None: + result['Provider'] = self.provider + if self.query is not None: + result['Query'] = self.query + if self.service_template_name is not None: + result['ServiceTemplateName'] = self.service_template_name if self.sort_by is not None: result['SortBy'] = self.sort_by - if self.source_id is not None: - result['SourceId'] = self.source_id - if self.source_type is not None: - result['SourceType'] = self.source_type - if self.version_name is not None: - result['VersionName'] = self.version_name return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ApprovalStatus') is not None: - self.approval_status = m.get('ApprovalStatus') - if m.get('FormatType') is not None: - self.format_type = m.get('FormatType') - if m.get('FrameworkType') is not None: - self.framework_type = m.get('FrameworkType') if m.get('Label') is not None: self.label = m.get('Label') - if m.get('LabelString') is not None: - self.label_string = m.get('LabelString') - if m.get('Labels') is not None: - self.labels = m.get('Labels') if m.get('Order') is not None: self.order = m.get('Order') if m.get('PageNumber') is not None: self.page_number = m.get('PageNumber') if m.get('PageSize') is not None: self.page_size = m.get('PageSize') + if m.get('Provider') is not None: + self.provider = m.get('Provider') + if m.get('Query') is not None: + self.query = m.get('Query') + if m.get('ServiceTemplateName') is not None: + self.service_template_name = m.get('ServiceTemplateName') if m.get('SortBy') is not None: self.sort_by = m.get('SortBy') - if m.get('SourceId') is not None: - self.source_id = m.get('SourceId') - if m.get('SourceType') is not None: - self.source_type = m.get('SourceType') - if m.get('VersionName') is not None: - self.version_name = m.get('VersionName') return self -class ListModelVersionsResponseBody(TeaModel): +class ListServiceTemplatesResponseBody(TeaModel): def __init__( self, request_id: str = None, + service_templates: List[ServiceTemplate] = None, total_count: int = None, - versions: List[ModelVersion] = None, ): self.request_id = request_id + self.service_templates = service_templates self.total_count = total_count - self.versions = versions def validate(self): - if self.versions: - for k in self.versions: + if self.service_templates: + for k in self.service_templates: if k: k.validate() @@ -8596,43 +16017,40 @@ def to_map(self): result = dict() if self.request_id is not None: result['RequestId'] = self.request_id + result['ServiceTemplates'] = [] + if self.service_templates is not None: + for k in self.service_templates: + result['ServiceTemplates'].append(k.to_map() if k else None) if self.total_count is not None: result['TotalCount'] = self.total_count - result['Versions'] = [] - if self.versions is not None: - for k in self.versions: - result['Versions'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + self.service_templates = [] + if m.get('ServiceTemplates') is not None: + for k in m.get('ServiceTemplates'): + temp_model = ServiceTemplate() + self.service_templates.append(temp_model.from_map(k)) if m.get('TotalCount') is not None: self.total_count = m.get('TotalCount') - self.versions = [] - if m.get('Versions') is not None: - for k in m.get('Versions'): - temp_model = ModelVersion() - self.versions.append(temp_model.from_map(k)) return self -class ListModelVersionsResponse(TeaModel): +class ListServiceTemplatesResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListModelVersionsResponseBody = None, + body: ListServiceTemplatesResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -8657,43 +16075,19 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListModelVersionsResponseBody() + temp_model = ListServiceTemplatesResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListModelsRequest(TeaModel): +class ListUserConfigsRequest(TeaModel): def __init__( self, - domain: str = None, - label: str = None, - label_string: str = None, - labels: str = None, - model_name: str = None, - order: str = None, - origin: str = None, - page_number: int = None, - page_size: int = None, - provider: str = None, - query: str = None, - sort_by: str = None, - task: str = None, - workspace_id: str = None, + category_names: str = None, + config_keys: str = None, ): - self.domain = domain - self.label = label - self.label_string = label_string - self.labels = labels - self.model_name = model_name - self.order = order - self.origin = origin - self.page_number = page_number - self.page_size = page_size - self.provider = provider - self.query = query - self.sort_by = sort_by - self.task = task - self.workspace_id = workspace_id + self.category_names = category_names + self.config_keys = config_keys def validate(self): pass @@ -8704,83 +16098,74 @@ def to_map(self): return _map result = dict() - if self.domain is not None: - result['Domain'] = self.domain - if self.label is not None: - result['Label'] = self.label - if self.label_string is not None: - result['LabelString'] = self.label_string - if self.labels is not None: - result['Labels'] = self.labels - if self.model_name is not None: - result['ModelName'] = self.model_name - if self.order is not None: - result['Order'] = self.order - if self.origin is not None: - result['Origin'] = self.origin - if self.page_number is not None: - result['PageNumber'] = self.page_number - if self.page_size is not None: - result['PageSize'] = self.page_size - if self.provider is not None: - result['Provider'] = self.provider - if self.query is not None: - result['Query'] = self.query - if self.sort_by is not None: - result['SortBy'] = self.sort_by - if self.task is not None: - result['Task'] = self.task - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.category_names is not None: + result['CategoryNames'] = self.category_names + if self.config_keys is not None: + result['ConfigKeys'] = self.config_keys return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Domain') is not None: - self.domain = m.get('Domain') - if m.get('Label') is not None: - self.label = m.get('Label') - if m.get('LabelString') is not None: - self.label_string = m.get('LabelString') - if m.get('Labels') is not None: - self.labels = m.get('Labels') - if m.get('ModelName') is not None: - self.model_name = m.get('ModelName') - if m.get('Order') is not None: - self.order = m.get('Order') - if m.get('Origin') is not None: - self.origin = m.get('Origin') - if m.get('PageNumber') is not None: - self.page_number = m.get('PageNumber') - if m.get('PageSize') is not None: - self.page_size = m.get('PageSize') - if m.get('Provider') is not None: - self.provider = m.get('Provider') - if m.get('Query') is not None: - self.query = m.get('Query') - if m.get('SortBy') is not None: - self.sort_by = m.get('SortBy') - if m.get('Task') is not None: - self.task = m.get('Task') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') + if m.get('CategoryNames') is not None: + self.category_names = m.get('CategoryNames') + if m.get('ConfigKeys') is not None: + self.config_keys = m.get('ConfigKeys') + return self + + +class ListUserConfigsResponseBodyConfigs(TeaModel): + def __init__( + self, + category_name: str = None, + config_key: str = None, + config_value: str = None, + ): + self.category_name = category_name + self.config_key = config_key + self.config_value = config_value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.category_name is not None: + result['CategoryName'] = self.category_name + if self.config_key is not None: + result['ConfigKey'] = self.config_key + if self.config_value is not None: + result['ConfigValue'] = self.config_value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('CategoryName') is not None: + self.category_name = m.get('CategoryName') + if m.get('ConfigKey') is not None: + self.config_key = m.get('ConfigKey') + if m.get('ConfigValue') is not None: + self.config_value = m.get('ConfigValue') return self -class ListModelsResponseBody(TeaModel): +class ListUserConfigsResponseBody(TeaModel): def __init__( self, - models: List[Model] = None, + configs: List[ListUserConfigsResponseBodyConfigs] = None, request_id: str = None, total_count: int = None, ): - self.models = models + self.configs = configs self.request_id = request_id self.total_count = total_count def validate(self): - if self.models: - for k in self.models: + if self.configs: + for k in self.configs: if k: k.validate() @@ -8790,10 +16175,10 @@ def to_map(self): return _map result = dict() - result['Models'] = [] - if self.models is not None: - for k in self.models: - result['Models'].append(k.to_map() if k else None) + result['Configs'] = [] + if self.configs is not None: + for k in self.configs: + result['Configs'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id if self.total_count is not None: @@ -8802,11 +16187,11 @@ def to_map(self): def from_map(self, m: dict = None): m = m or dict() - self.models = [] - if m.get('Models') is not None: - for k in m.get('Models'): - temp_model = Model() - self.models.append(temp_model.from_map(k)) + self.configs = [] + if m.get('Configs') is not None: + for k in m.get('Configs'): + temp_model = ListUserConfigsResponseBodyConfigs() + self.configs.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') if m.get('TotalCount') is not None: @@ -8814,21 +16199,18 @@ def from_map(self, m: dict = None): return self -class ListModelsResponse(TeaModel): +class ListUserConfigsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListModelsResponseBody = None, + body: ListUserConfigsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -8853,19 +16235,25 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListModelsResponseBody() + temp_model = ListUserConfigsResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListModuleConfigsRequest(TeaModel): +class ListUsersRequest(TeaModel): def __init__( self, - module_codes: str = None, - region: str = None, + account_types: str = None, + page_number: int = None, + page_size: int = None, + user_ids: str = None, + user_name: str = None, ): - self.module_codes = module_codes - self.region = region + self.account_types = account_types + self.page_number = page_number + self.page_size = page_size + self.user_ids = user_ids + self.user_name = user_name def validate(self): pass @@ -8876,29 +16264,43 @@ def to_map(self): return _map result = dict() - if self.module_codes is not None: - result['ModuleCodes'] = self.module_codes - if self.region is not None: - result['Region'] = self.region + if self.account_types is not None: + result['AccountTypes'] = self.account_types + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.user_ids is not None: + result['UserIds'] = self.user_ids + if self.user_name is not None: + result['UserName'] = self.user_name return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ModuleCodes') is not None: - self.module_codes = m.get('ModuleCodes') - if m.get('Region') is not None: - self.region = m.get('Region') + if m.get('AccountTypes') is not None: + self.account_types = m.get('AccountTypes') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('UserIds') is not None: + self.user_ids = m.get('UserIds') + if m.get('UserName') is not None: + self.user_name = m.get('UserName') return self -class ListModuleConfigsResponseBodyModuleConfigsConfigs(TeaModel): +class ListUsersResponseBodyUsers(TeaModel): def __init__( self, - key: str = None, - value: str = None, + display_name: str = None, + user_id: str = None, + user_name: str = None, ): - self.key = key - self.value = value + self.display_name = display_name + self.user_id = user_id + self.user_name = user_name def validate(self): pass @@ -8909,35 +16311,39 @@ def to_map(self): return _map result = dict() - if self.key is not None: - result['Key'] = self.key - if self.value is not None: - result['Value'] = self.value + if self.display_name is not None: + result['DisplayName'] = self.display_name + if self.user_id is not None: + result['UserId'] = self.user_id + if self.user_name is not None: + result['UserName'] = self.user_name return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Key') is not None: - self.key = m.get('Key') - if m.get('Value') is not None: - self.value = m.get('Value') + if m.get('DisplayName') is not None: + self.display_name = m.get('DisplayName') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('UserName') is not None: + self.user_name = m.get('UserName') return self -class ListModuleConfigsResponseBodyModuleConfigs(TeaModel): +class ListUsersResponseBody(TeaModel): def __init__( self, - configs: List[ListModuleConfigsResponseBodyModuleConfigsConfigs] = None, - module_code: str = None, - region: str = None, + request_id: str = None, + total_count: int = None, + users: List[ListUsersResponseBodyUsers] = None, ): - self.configs = configs - self.module_code = module_code - self.region = region + self.request_id = request_id + self.total_count = total_count + self.users = users def validate(self): - if self.configs: - for k in self.configs: + if self.users: + for k in self.users: if k: k.validate() @@ -8947,46 +16353,44 @@ def to_map(self): return _map result = dict() - result['Configs'] = [] - if self.configs is not None: - for k in self.configs: - result['Configs'].append(k.to_map() if k else None) - if self.module_code is not None: - result['ModuleCode'] = self.module_code - if self.region is not None: - result['Region'] = self.region + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.total_count is not None: + result['TotalCount'] = self.total_count + result['Users'] = [] + if self.users is not None: + for k in self.users: + result['Users'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() - self.configs = [] - if m.get('Configs') is not None: - for k in m.get('Configs'): - temp_model = ListModuleConfigsResponseBodyModuleConfigsConfigs() - self.configs.append(temp_model.from_map(k)) - if m.get('ModuleCode') is not None: - self.module_code = m.get('ModuleCode') - if m.get('Region') is not None: - self.region = m.get('Region') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') + self.users = [] + if m.get('Users') is not None: + for k in m.get('Users'): + temp_model = ListUsersResponseBodyUsers() + self.users.append(temp_model.from_map(k)) return self -class ListModuleConfigsResponseBody(TeaModel): +class ListUsersResponse(TeaModel): def __init__( self, - module_configs: List[ListModuleConfigsResponseBodyModuleConfigs] = None, - request_id: str = None, - total_count: int = None, + headers: Dict[str, str] = None, + status_code: int = None, + body: ListUsersResponseBody = None, ): - self.module_configs = module_configs - self.request_id = request_id - self.total_count = total_count + self.headers = headers + self.status_code = status_code + self.body = body def validate(self): - if self.module_configs: - for k in self.module_configs: - if k: - k.validate() + if self.body: + self.body.validate() def to_map(self): _map = super().to_map() @@ -8994,47 +16398,93 @@ def to_map(self): return _map result = dict() - result['ModuleConfigs'] = [] - if self.module_configs is not None: - for k in self.module_configs: - result['ModuleConfigs'].append(k.to_map() if k else None) - if self.request_id is not None: - result['RequestId'] = self.request_id - if self.total_count is not None: - result['TotalCount'] = self.total_count + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() return result def from_map(self, m: dict = None): m = m or dict() - self.module_configs = [] - if m.get('ModuleConfigs') is not None: - for k in m.get('ModuleConfigs'): - temp_model = ListModuleConfigsResponseBodyModuleConfigs() - self.module_configs.append(temp_model.from_map(k)) - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') - if m.get('TotalCount') is not None: - self.total_count = m.get('TotalCount') + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = ListUsersResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class ListWorkspacePermissionsRequestPermissions(TeaModel): + def __init__( + self, + accessibility: str = None, + creator: str = None, + id: str = None, + permission_code: str = None, + resource: str = None, + ): + self.accessibility = accessibility + self.creator = creator + self.id = id + # This parameter is required. + self.permission_code = permission_code + self.resource = resource + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + if self.creator is not None: + result['Creator'] = self.creator + if self.id is not None: + result['ID'] = self.id + if self.permission_code is not None: + result['PermissionCode'] = self.permission_code + if self.resource is not None: + result['Resource'] = self.resource + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + if m.get('Creator') is not None: + self.creator = m.get('Creator') + if m.get('ID') is not None: + self.id = m.get('ID') + if m.get('PermissionCode') is not None: + self.permission_code = m.get('PermissionCode') + if m.get('Resource') is not None: + self.resource = m.get('Resource') return self -class ListModuleConfigsResponse(TeaModel): +class ListWorkspacePermissionsRequest(TeaModel): def __init__( self, - headers: Dict[str, str] = None, - status_code: int = None, - body: ListModuleConfigsResponseBody = None, + options: Dict[str, Any] = None, + permissions: List[ListWorkspacePermissionsRequestPermissions] = None, ): - self.headers = headers - self.status_code = status_code - self.body = body + self.options = options + # This parameter is required. + self.permissions = permissions def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() + if self.permissions: + for k in self.permissions: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -9042,46 +16492,34 @@ def to_map(self): return _map result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + if self.options is not None: + result['Options'] = self.options + result['Permissions'] = [] + if self.permissions is not None: + for k in self.permissions: + result['Permissions'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = ListModuleConfigsResponseBody() - self.body = temp_model.from_map(m['body']) + if m.get('Options') is not None: + self.options = m.get('Options') + self.permissions = [] + if m.get('Permissions') is not None: + for k in m.get('Permissions'): + temp_model = ListWorkspacePermissionsRequestPermissions() + self.permissions.append(temp_model.from_map(k)) return self -class ListOperationLogsRequest(TeaModel): +class ListWorkspacePermissionsResponseBodyPermissionsPermissionRules(TeaModel): def __init__( self, - entity_status: str = None, - entity_types: str = None, - operation_status: str = None, - operations: str = None, - order: str = None, - page_number: int = None, - page_size: int = None, - sort_by: str = None, + accessibility: str = None, + entity_access_type: str = None, ): - self.entity_status = entity_status - self.entity_types = entity_types - self.operation_status = operation_status - self.operations = operations - self.order = order - self.page_number = page_number - self.page_size = page_size - self.sort_by = sort_by + self.accessibility = accessibility + self.entity_access_type = entity_access_type def validate(self): pass @@ -9092,66 +16530,41 @@ def to_map(self): return _map result = dict() - if self.entity_status is not None: - result['EntityStatus'] = self.entity_status - if self.entity_types is not None: - result['EntityTypes'] = self.entity_types - if self.operation_status is not None: - result['OperationStatus'] = self.operation_status - if self.operations is not None: - result['Operations'] = self.operations - if self.order is not None: - result['Order'] = self.order - if self.page_number is not None: - result['PageNumber'] = self.page_number - if self.page_size is not None: - result['PageSize'] = self.page_size - if self.sort_by is not None: - result['SortBy'] = self.sort_by + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + if self.entity_access_type is not None: + result['EntityAccessType'] = self.entity_access_type return result def from_map(self, m: dict = None): m = m or dict() - if m.get('EntityStatus') is not None: - self.entity_status = m.get('EntityStatus') - if m.get('EntityTypes') is not None: - self.entity_types = m.get('EntityTypes') - if m.get('OperationStatus') is not None: - self.operation_status = m.get('OperationStatus') - if m.get('Operations') is not None: - self.operations = m.get('Operations') - if m.get('Order') is not None: - self.order = m.get('Order') - if m.get('PageNumber') is not None: - self.page_number = m.get('PageNumber') - if m.get('PageSize') is not None: - self.page_size = m.get('PageSize') - if m.get('SortBy') is not None: - self.sort_by = m.get('SortBy') + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + if m.get('EntityAccessType') is not None: + self.entity_access_type = m.get('EntityAccessType') return self -class ListOperationLogsResponseBodyLogs(TeaModel): +class ListWorkspacePermissionsResponseBodyPermissions(TeaModel): def __init__( self, - entity_id: str = None, - entity_type: str = None, - gmt_create_time: str = None, + code: str = None, + id: str = None, message: str = None, - operation: str = None, - operation_status: str = None, - operator: str = None, + permission_code: str = None, + permission_rules: List[ListWorkspacePermissionsResponseBodyPermissionsPermissionRules] = None, ): - self.entity_id = entity_id - self.entity_type = entity_type - self.gmt_create_time = gmt_create_time + self.code = code + self.id = id self.message = message - self.operation = operation - self.operation_status = operation_status - self.operator = operator + self.permission_code = permission_code + self.permission_rules = permission_rules def validate(self): - pass + if self.permission_rules: + for k in self.permission_rules: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -9159,55 +16572,50 @@ def to_map(self): return _map result = dict() - if self.entity_id is not None: - result['EntityId'] = self.entity_id - if self.entity_type is not None: - result['EntityType'] = self.entity_type - if self.gmt_create_time is not None: - result['GmtCreateTime'] = self.gmt_create_time + if self.code is not None: + result['Code'] = self.code + if self.id is not None: + result['ID'] = self.id if self.message is not None: result['Message'] = self.message - if self.operation is not None: - result['Operation'] = self.operation - if self.operation_status is not None: - result['OperationStatus'] = self.operation_status - if self.operator is not None: - result['Operator'] = self.operator + if self.permission_code is not None: + result['PermissionCode'] = self.permission_code + result['PermissionRules'] = [] + if self.permission_rules is not None: + for k in self.permission_rules: + result['PermissionRules'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() - if m.get('EntityId') is not None: - self.entity_id = m.get('EntityId') - if m.get('EntityType') is not None: - self.entity_type = m.get('EntityType') - if m.get('GmtCreateTime') is not None: - self.gmt_create_time = m.get('GmtCreateTime') + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('ID') is not None: + self.id = m.get('ID') if m.get('Message') is not None: self.message = m.get('Message') - if m.get('Operation') is not None: - self.operation = m.get('Operation') - if m.get('OperationStatus') is not None: - self.operation_status = m.get('OperationStatus') - if m.get('Operator') is not None: - self.operator = m.get('Operator') + if m.get('PermissionCode') is not None: + self.permission_code = m.get('PermissionCode') + self.permission_rules = [] + if m.get('PermissionRules') is not None: + for k in m.get('PermissionRules'): + temp_model = ListWorkspacePermissionsResponseBodyPermissionsPermissionRules() + self.permission_rules.append(temp_model.from_map(k)) return self -class ListOperationLogsResponseBody(TeaModel): +class ListWorkspacePermissionsResponseBody(TeaModel): def __init__( self, - logs: List[ListOperationLogsResponseBodyLogs] = None, + permissions: List[ListWorkspacePermissionsResponseBodyPermissions] = None, request_id: str = None, - total_count: int = None, ): - self.logs = logs + self.permissions = permissions self.request_id = request_id - self.total_count = total_count def validate(self): - if self.logs: - for k in self.logs: + if self.permissions: + for k in self.permissions: if k: k.validate() @@ -9217,45 +16625,38 @@ def to_map(self): return _map result = dict() - result['Logs'] = [] - if self.logs is not None: - for k in self.logs: - result['Logs'].append(k.to_map() if k else None) + result['Permissions'] = [] + if self.permissions is not None: + for k in self.permissions: + result['Permissions'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id - if self.total_count is not None: - result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() - self.logs = [] - if m.get('Logs') is not None: - for k in m.get('Logs'): - temp_model = ListOperationLogsResponseBodyLogs() - self.logs.append(temp_model.from_map(k)) + self.permissions = [] + if m.get('Permissions') is not None: + for k in m.get('Permissions'): + temp_model = ListWorkspacePermissionsResponseBodyPermissions() + self.permissions.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('TotalCount') is not None: - self.total_count = m.get('TotalCount') return self -class ListOperationLogsResponse(TeaModel): +class ListWorkspacePermissionsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListOperationLogsResponseBody = None, + body: ListWorkspacePermissionsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -9280,12 +16681,87 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListOperationLogsResponseBody() + temp_model = ListWorkspacePermissionsResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListPermissionsResponseBodyPermissionsPermissionRules(TeaModel): +class ListWorkspaceRolesRequest(TeaModel): + def __init__( + self, + order: str = None, + page_number: int = None, + page_size: int = None, + role_ids: str = None, + role_name: str = None, + role_type: str = None, + sort_by: str = None, + status: str = None, + verbose_fields: str = None, + ): + self.order = order + self.page_number = page_number + self.page_size = page_size + self.role_ids = role_ids + self.role_name = role_name + self.role_type = role_type + self.sort_by = sort_by + self.status = status + self.verbose_fields = verbose_fields + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.order is not None: + result['Order'] = self.order + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.role_ids is not None: + result['RoleIds'] = self.role_ids + if self.role_name is not None: + result['RoleName'] = self.role_name + if self.role_type is not None: + result['RoleType'] = self.role_type + if self.sort_by is not None: + result['SortBy'] = self.sort_by + if self.status is not None: + result['Status'] = self.status + if self.verbose_fields is not None: + result['VerboseFields'] = self.verbose_fields + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('RoleIds') is not None: + self.role_ids = m.get('RoleIds') + if m.get('RoleName') is not None: + self.role_name = m.get('RoleName') + if m.get('RoleType') is not None: + self.role_type = m.get('RoleType') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') + if m.get('Status') is not None: + self.status = m.get('Status') + if m.get('VerboseFields') is not None: + self.verbose_fields = m.get('VerboseFields') + return self + + +class ListWorkspaceRolesResponseBodyRolesModulePermissionsPermissionsPermissionRules(TeaModel): def __init__( self, accessibility: str = None, @@ -9318,13 +16794,14 @@ def from_map(self, m: dict = None): return self -class ListPermissionsResponseBodyPermissions(TeaModel): +class ListWorkspaceRolesResponseBodyRolesModulePermissionsPermissions(TeaModel): def __init__( self, - permission_code: str = None, - permission_rules: List[ListPermissionsResponseBodyPermissionsPermissionRules] = None, + permission_codes: List[str] = None, + permission_rules: List[ + ListWorkspaceRolesResponseBodyRolesModulePermissionsPermissionsPermissionRules] = None, ): - self.permission_code = permission_code + self.permission_codes = permission_codes self.permission_rules = permission_rules def validate(self): @@ -9339,8 +16816,8 @@ def to_map(self): return _map result = dict() - if self.permission_code is not None: - result['PermissionCode'] = self.permission_code + if self.permission_codes is not None: + result['PermissionCodes'] = self.permission_codes result['PermissionRules'] = [] if self.permission_rules is not None: for k in self.permission_rules: @@ -9349,26 +16826,26 @@ def to_map(self): def from_map(self, m: dict = None): m = m or dict() - if m.get('PermissionCode') is not None: - self.permission_code = m.get('PermissionCode') + if m.get('PermissionCodes') is not None: + self.permission_codes = m.get('PermissionCodes') self.permission_rules = [] if m.get('PermissionRules') is not None: for k in m.get('PermissionRules'): - temp_model = ListPermissionsResponseBodyPermissionsPermissionRules() + temp_model = ListWorkspaceRolesResponseBodyRolesModulePermissionsPermissionsPermissionRules() self.permission_rules.append(temp_model.from_map(k)) return self -class ListPermissionsResponseBody(TeaModel): +class ListWorkspaceRolesResponseBodyRolesModulePermissions(TeaModel): def __init__( self, - permissions: List[ListPermissionsResponseBodyPermissions] = None, - request_id: str = None, - total_count: int = None, + module_name: str = None, + permission_type: str = None, + permissions: List[ListWorkspaceRolesResponseBodyRolesModulePermissionsPermissions] = None, ): + self.module_name = module_name + self.permission_type = permission_type self.permissions = permissions - self.request_id = request_id - self.total_count = total_count def validate(self): if self.permissions: @@ -9382,118 +16859,52 @@ def to_map(self): return _map result = dict() + if self.module_name is not None: + result['ModuleName'] = self.module_name + if self.permission_type is not None: + result['PermissionType'] = self.permission_type result['Permissions'] = [] if self.permissions is not None: for k in self.permissions: result['Permissions'].append(k.to_map() if k else None) - if self.request_id is not None: - result['RequestId'] = self.request_id - if self.total_count is not None: - result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() + if m.get('ModuleName') is not None: + self.module_name = m.get('ModuleName') + if m.get('PermissionType') is not None: + self.permission_type = m.get('PermissionType') self.permissions = [] if m.get('Permissions') is not None: for k in m.get('Permissions'): - temp_model = ListPermissionsResponseBodyPermissions() + temp_model = ListWorkspaceRolesResponseBodyRolesModulePermissionsPermissions() self.permissions.append(temp_model.from_map(k)) - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') - if m.get('TotalCount') is not None: - self.total_count = m.get('TotalCount') - return self - - -class ListPermissionsResponse(TeaModel): - def __init__( - self, - headers: Dict[str, str] = None, - status_code: int = None, - body: ListPermissionsResponseBody = None, - ): - self.headers = headers - self.status_code = status_code - self.body = body - - def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = ListPermissionsResponseBody() - self.body = temp_model.from_map(m['body']) - return self - - -class ListProductAuthorizationsRequest(TeaModel): - def __init__( - self, - ram_role_names: str = None, - ): - self.ram_role_names = ram_role_names - - def validate(self): - pass - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.ram_role_names is not None: - result['RamRoleNames'] = self.ram_role_names - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('RamRoleNames') is not None: - self.ram_role_names = m.get('RamRoleNames') return self -class ListProductAuthorizationsResponseBodyAuthorizationDetails(TeaModel): +class ListWorkspaceRolesResponseBodyRoles(TeaModel): def __init__( self, - authorization_url: str = None, - is_authorized: bool = None, - ram_role_arn: str = None, - ram_role_name: str = None, - ram_role_type: str = None, + creator: str = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + module_permissions: List[ListWorkspaceRolesResponseBodyRolesModulePermissions] = None, + role_id: str = None, + role_name: str = None, ): - self.authorization_url = authorization_url - self.is_authorized = is_authorized - self.ram_role_arn = ram_role_arn - self.ram_role_name = ram_role_name - self.ram_role_type = ram_role_type + self.creator = creator + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.module_permissions = module_permissions + self.role_id = role_id + self.role_name = role_name def validate(self): - pass + if self.module_permissions: + for k in self.module_permissions: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -9501,47 +16912,56 @@ def to_map(self): return _map result = dict() - if self.authorization_url is not None: - result['AuthorizationUrl'] = self.authorization_url - if self.is_authorized is not None: - result['IsAuthorized'] = self.is_authorized - if self.ram_role_arn is not None: - result['RamRoleARN'] = self.ram_role_arn - if self.ram_role_name is not None: - result['RamRoleName'] = self.ram_role_name - if self.ram_role_type is not None: - result['RamRoleType'] = self.ram_role_type + if self.creator is not None: + result['Creator'] = self.creator + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + result['ModulePermissions'] = [] + if self.module_permissions is not None: + for k in self.module_permissions: + result['ModulePermissions'].append(k.to_map() if k else None) + if self.role_id is not None: + result['RoleId'] = self.role_id + if self.role_name is not None: + result['RoleName'] = self.role_name return result def from_map(self, m: dict = None): m = m or dict() - if m.get('AuthorizationUrl') is not None: - self.authorization_url = m.get('AuthorizationUrl') - if m.get('IsAuthorized') is not None: - self.is_authorized = m.get('IsAuthorized') - if m.get('RamRoleARN') is not None: - self.ram_role_arn = m.get('RamRoleARN') - if m.get('RamRoleName') is not None: - self.ram_role_name = m.get('RamRoleName') - if m.get('RamRoleType') is not None: - self.ram_role_type = m.get('RamRoleType') + if m.get('Creator') is not None: + self.creator = m.get('Creator') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + self.module_permissions = [] + if m.get('ModulePermissions') is not None: + for k in m.get('ModulePermissions'): + temp_model = ListWorkspaceRolesResponseBodyRolesModulePermissions() + self.module_permissions.append(temp_model.from_map(k)) + if m.get('RoleId') is not None: + self.role_id = m.get('RoleId') + if m.get('RoleName') is not None: + self.role_name = m.get('RoleName') return self -class ListProductAuthorizationsResponseBody(TeaModel): +class ListWorkspaceRolesResponseBody(TeaModel): def __init__( self, - authorization_details: List[ListProductAuthorizationsResponseBodyAuthorizationDetails] = None, - authorization_url: str = None, request_id: str = None, + roles: List[ListWorkspaceRolesResponseBodyRoles] = None, + total_count: int = None, ): - self.authorization_details = authorization_details - self.authorization_url = authorization_url self.request_id = request_id + self.roles = roles + self.total_count = total_count def validate(self): - if self.authorization_details: - for k in self.authorization_details: + if self.roles: + for k in self.roles: if k: k.validate() @@ -9551,45 +16971,42 @@ def to_map(self): return _map result = dict() - result['AuthorizationDetails'] = [] - if self.authorization_details is not None: - for k in self.authorization_details: - result['AuthorizationDetails'].append(k.to_map() if k else None) - if self.authorization_url is not None: - result['AuthorizationUrl'] = self.authorization_url if self.request_id is not None: result['RequestId'] = self.request_id + result['Roles'] = [] + if self.roles is not None: + for k in self.roles: + result['Roles'].append(k.to_map() if k else None) + if self.total_count is not None: + result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() - self.authorization_details = [] - if m.get('AuthorizationDetails') is not None: - for k in m.get('AuthorizationDetails'): - temp_model = ListProductAuthorizationsResponseBodyAuthorizationDetails() - self.authorization_details.append(temp_model.from_map(k)) - if m.get('AuthorizationUrl') is not None: - self.authorization_url = m.get('AuthorizationUrl') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + self.roles = [] + if m.get('Roles') is not None: + for k in m.get('Roles'): + temp_model = ListWorkspaceRolesResponseBodyRoles() + self.roles.append(temp_model.from_map(k)) + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') return self -class ListProductAuthorizationsResponse(TeaModel): +class ListWorkspaceRolesResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListProductAuthorizationsResponseBody = None, + body: ListWorkspaceRolesResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -9614,111 +17031,46 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListProductAuthorizationsResponseBody() + temp_model = ListWorkspaceRolesResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListProductsRequest(TeaModel): - def __init__( - self, - product_codes: str = None, - service_codes: str = None, - verbose: bool = None, - ): - self.product_codes = product_codes - self.service_codes = service_codes - self.verbose = verbose - - def validate(self): - pass - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.product_codes is not None: - result['ProductCodes'] = self.product_codes - if self.service_codes is not None: - result['ServiceCodes'] = self.service_codes - if self.verbose is not None: - result['Verbose'] = self.verbose - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('ProductCodes') is not None: - self.product_codes = m.get('ProductCodes') - if m.get('ServiceCodes') is not None: - self.service_codes = m.get('ServiceCodes') - if m.get('Verbose') is not None: - self.verbose = m.get('Verbose') - return self - - -class ListProductsResponseBodyProducts(TeaModel): +class ListWorkspaceUsersRequest(TeaModel): def __init__( self, - has_permission_to_purchase: bool = None, - is_purchased: bool = None, - product_code: str = None, - product_instance_id: str = None, - purchase_url: str = None, + user_name: str = None, ): - self.has_permission_to_purchase = has_permission_to_purchase - self.is_purchased = is_purchased - self.product_code = product_code - self.product_instance_id = product_instance_id - self.purchase_url = purchase_url + self.user_name = user_name def validate(self): - pass - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.has_permission_to_purchase is not None: - result['HasPermissionToPurchase'] = self.has_permission_to_purchase - if self.is_purchased is not None: - result['IsPurchased'] = self.is_purchased - if self.product_code is not None: - result['ProductCode'] = self.product_code - if self.product_instance_id is not None: - result['ProductInstanceId'] = self.product_instance_id - if self.purchase_url is not None: - result['PurchaseUrl'] = self.purchase_url + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.user_name is not None: + result['UserName'] = self.user_name return result def from_map(self, m: dict = None): m = m or dict() - if m.get('HasPermissionToPurchase') is not None: - self.has_permission_to_purchase = m.get('HasPermissionToPurchase') - if m.get('IsPurchased') is not None: - self.is_purchased = m.get('IsPurchased') - if m.get('ProductCode') is not None: - self.product_code = m.get('ProductCode') - if m.get('ProductInstanceId') is not None: - self.product_instance_id = m.get('ProductInstanceId') - if m.get('PurchaseUrl') is not None: - self.purchase_url = m.get('PurchaseUrl') + if m.get('UserName') is not None: + self.user_name = m.get('UserName') return self -class ListProductsResponseBodyServices(TeaModel): +class ListWorkspaceUsersResponseBodyUsers(TeaModel): def __init__( self, - is_open: bool = None, - open_url: str = None, - service_code: str = None, + user_id: str = None, + user_name: str = None, ): - self.is_open = is_open - self.open_url = open_url - self.service_code = service_code + self.user_id = user_id + self.user_name = user_name def validate(self): pass @@ -9729,43 +17081,35 @@ def to_map(self): return _map result = dict() - if self.is_open is not None: - result['IsOpen'] = self.is_open - if self.open_url is not None: - result['OpenUrl'] = self.open_url - if self.service_code is not None: - result['ServiceCode'] = self.service_code + if self.user_id is not None: + result['UserId'] = self.user_id + if self.user_name is not None: + result['UserName'] = self.user_name return result def from_map(self, m: dict = None): m = m or dict() - if m.get('IsOpen') is not None: - self.is_open = m.get('IsOpen') - if m.get('OpenUrl') is not None: - self.open_url = m.get('OpenUrl') - if m.get('ServiceCode') is not None: - self.service_code = m.get('ServiceCode') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('UserName') is not None: + self.user_name = m.get('UserName') return self -class ListProductsResponseBody(TeaModel): +class ListWorkspaceUsersResponseBody(TeaModel): def __init__( self, - products: List[ListProductsResponseBodyProducts] = None, request_id: str = None, - services: List[ListProductsResponseBodyServices] = None, + total_count: int = None, + users: List[ListWorkspaceUsersResponseBodyUsers] = None, ): - self.products = products self.request_id = request_id - self.services = services + self.total_count = total_count + self.users = users def validate(self): - if self.products: - for k in self.products: - if k: - k.validate() - if self.services: - for k in self.services: + if self.users: + for k in self.users: if k: k.validate() @@ -9775,50 +17119,42 @@ def to_map(self): return _map result = dict() - result['Products'] = [] - if self.products is not None: - for k in self.products: - result['Products'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id - result['Services'] = [] - if self.services is not None: - for k in self.services: - result['Services'].append(k.to_map() if k else None) + if self.total_count is not None: + result['TotalCount'] = self.total_count + result['Users'] = [] + if self.users is not None: + for k in self.users: + result['Users'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() - self.products = [] - if m.get('Products') is not None: - for k in m.get('Products'): - temp_model = ListProductsResponseBodyProducts() - self.products.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - self.services = [] - if m.get('Services') is not None: - for k in m.get('Services'): - temp_model = ListProductsResponseBodyServices() - self.services.append(temp_model.from_map(k)) + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') + self.users = [] + if m.get('Users') is not None: + for k in m.get('Users'): + temp_model = ListWorkspaceUsersResponseBodyUsers() + self.users.append(temp_model.from_map(k)) return self -class ListProductsResponse(TeaModel): +class ListWorkspaceUsersResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListProductsResponseBody = None, + body: ListWorkspaceUsersResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -9843,48 +17179,37 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListProductsResponseBody() + temp_model = ListWorkspaceUsersResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListQuotasRequest(TeaModel): - def __init__( - self, - name: str = None, - ): - self.name = name - - def validate(self): - pass - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.name is not None: - result['Name'] = self.name - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('Name') is not None: - self.name = m.get('Name') - return self - - -class ListQuotasResponseBodyQuotasSpecs(TeaModel): +class ListWorkspacesRequest(TeaModel): def __init__( self, - name: str = None, - type: str = None, - value: str = None, + fields: str = None, + module_list: str = None, + option: str = None, + order: str = None, + page_number: int = None, + page_size: int = None, + sort_by: str = None, + status: str = None, + verbose: bool = None, + workspace_ids: str = None, + workspace_name: str = None, ): - self.name = name - self.type = type - self.value = value + self.fields = fields + self.module_list = module_list + self.option = option + self.order = order + self.page_number = page_number + self.page_size = page_size + self.sort_by = sort_by + self.status = status + self.verbose = verbose + self.workspace_ids = workspace_ids + self.workspace_name = workspace_name def validate(self): pass @@ -9895,49 +17220,86 @@ def to_map(self): return _map result = dict() - if self.name is not None: - result['Name'] = self.name - if self.type is not None: - result['Type'] = self.type - if self.value is not None: - result['Value'] = self.value + if self.fields is not None: + result['Fields'] = self.fields + if self.module_list is not None: + result['ModuleList'] = self.module_list + if self.option is not None: + result['Option'] = self.option + if self.order is not None: + result['Order'] = self.order + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.sort_by is not None: + result['SortBy'] = self.sort_by + if self.status is not None: + result['Status'] = self.status + if self.verbose is not None: + result['Verbose'] = self.verbose + if self.workspace_ids is not None: + result['WorkspaceIds'] = self.workspace_ids + if self.workspace_name is not None: + result['WorkspaceName'] = self.workspace_name return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Name') is not None: - self.name = m.get('Name') - if m.get('Type') is not None: - self.type = m.get('Type') - if m.get('Value') is not None: - self.value = m.get('Value') + if m.get('Fields') is not None: + self.fields = m.get('Fields') + if m.get('ModuleList') is not None: + self.module_list = m.get('ModuleList') + if m.get('Option') is not None: + self.option = m.get('Option') + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') + if m.get('Status') is not None: + self.status = m.get('Status') + if m.get('Verbose') is not None: + self.verbose = m.get('Verbose') + if m.get('WorkspaceIds') is not None: + self.workspace_ids = m.get('WorkspaceIds') + if m.get('WorkspaceName') is not None: + self.workspace_name = m.get('WorkspaceName') return self -class ListQuotasResponseBodyQuotas(TeaModel): +class ListWorkspacesResponseBodyWorkspaces(TeaModel): def __init__( self, - display_name: str = None, - id: str = None, - mode: str = None, - name: str = None, - product_code: str = None, - quota_type: str = None, - specs: List[ListQuotasResponseBodyQuotasSpecs] = None, + admin_names: List[str] = None, + creator: str = None, + description: str = None, + env_types: List[str] = None, + extra_infos: Dict[str, Any] = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + is_default: bool = None, + status: str = None, + workspace_id: str = None, + workspace_name: str = None, ): - self.display_name = display_name - self.id = id - self.mode = mode - self.name = name - self.product_code = product_code - self.quota_type = quota_type - self.specs = specs + self.admin_names = admin_names + self.creator = creator + self.description = description + self.env_types = env_types + self.extra_infos = extra_infos + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.is_default = is_default + self.status = status + self.workspace_id = workspace_id + self.workspace_name = workspace_name def validate(self): - if self.specs: - for k in self.specs: - if k: - k.validate() + pass def to_map(self): _map = super().to_map() @@ -9945,60 +17307,73 @@ def to_map(self): return _map result = dict() - if self.display_name is not None: - result['DisplayName'] = self.display_name - if self.id is not None: - result['Id'] = self.id - if self.mode is not None: - result['Mode'] = self.mode - if self.name is not None: - result['Name'] = self.name - if self.product_code is not None: - result['ProductCode'] = self.product_code - if self.quota_type is not None: - result['QuotaType'] = self.quota_type - result['Specs'] = [] - if self.specs is not None: - for k in self.specs: - result['Specs'].append(k.to_map() if k else None) + if self.admin_names is not None: + result['AdminNames'] = self.admin_names + if self.creator is not None: + result['Creator'] = self.creator + if self.description is not None: + result['Description'] = self.description + if self.env_types is not None: + result['EnvTypes'] = self.env_types + if self.extra_infos is not None: + result['ExtraInfos'] = self.extra_infos + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.is_default is not None: + result['IsDefault'] = self.is_default + if self.status is not None: + result['Status'] = self.status + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id + if self.workspace_name is not None: + result['WorkspaceName'] = self.workspace_name return result def from_map(self, m: dict = None): m = m or dict() - if m.get('DisplayName') is not None: - self.display_name = m.get('DisplayName') - if m.get('Id') is not None: - self.id = m.get('Id') - if m.get('Mode') is not None: - self.mode = m.get('Mode') - if m.get('Name') is not None: - self.name = m.get('Name') - if m.get('ProductCode') is not None: - self.product_code = m.get('ProductCode') - if m.get('QuotaType') is not None: - self.quota_type = m.get('QuotaType') - self.specs = [] - if m.get('Specs') is not None: - for k in m.get('Specs'): - temp_model = ListQuotasResponseBodyQuotasSpecs() - self.specs.append(temp_model.from_map(k)) + if m.get('AdminNames') is not None: + self.admin_names = m.get('AdminNames') + if m.get('Creator') is not None: + self.creator = m.get('Creator') + if m.get('Description') is not None: + self.description = m.get('Description') + if m.get('EnvTypes') is not None: + self.env_types = m.get('EnvTypes') + if m.get('ExtraInfos') is not None: + self.extra_infos = m.get('ExtraInfos') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('IsDefault') is not None: + self.is_default = m.get('IsDefault') + if m.get('Status') is not None: + self.status = m.get('Status') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + if m.get('WorkspaceName') is not None: + self.workspace_name = m.get('WorkspaceName') return self -class ListQuotasResponseBody(TeaModel): +class ListWorkspacesResponseBody(TeaModel): def __init__( self, - quotas: List[ListQuotasResponseBodyQuotas] = None, request_id: str = None, + resource_limits: Dict[str, Any] = None, total_count: int = None, + workspaces: List[ListWorkspacesResponseBodyWorkspaces] = None, ): - self.quotas = quotas self.request_id = request_id + self.resource_limits = resource_limits self.total_count = total_count + self.workspaces = workspaces def validate(self): - if self.quotas: - for k in self.quotas: + if self.workspaces: + for k in self.workspaces: if k: k.validate() @@ -10008,45 +17383,46 @@ def to_map(self): return _map result = dict() - result['Quotas'] = [] - if self.quotas is not None: - for k in self.quotas: - result['Quotas'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id + if self.resource_limits is not None: + result['ResourceLimits'] = self.resource_limits if self.total_count is not None: result['TotalCount'] = self.total_count + result['Workspaces'] = [] + if self.workspaces is not None: + for k in self.workspaces: + result['Workspaces'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() - self.quotas = [] - if m.get('Quotas') is not None: - for k in m.get('Quotas'): - temp_model = ListQuotasResponseBodyQuotas() - self.quotas.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('ResourceLimits') is not None: + self.resource_limits = m.get('ResourceLimits') if m.get('TotalCount') is not None: self.total_count = m.get('TotalCount') + self.workspaces = [] + if m.get('Workspaces') is not None: + for k in m.get('Workspaces'): + temp_model = ListWorkspacesResponseBodyWorkspaces() + self.workspaces.append(temp_model.from_map(k)) return self -class ListQuotasResponse(TeaModel): +class ListWorkspacesResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListQuotasResponseBody = None, + body: ListWorkspacesResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -10071,32 +17447,24 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListQuotasResponseBody() + temp_model = ListWorkspacesResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListResourcesRequest(TeaModel): +class MigrateDatasetsRequest(TeaModel): def __init__( self, - group_name: str = None, - option: str = None, - page_number: int = None, - page_size: int = None, - product_types: str = None, - resource_name: str = None, - resource_types: str = None, - verbose: bool = None, + count: int = None, + dataset_id: str = None, + if_force: bool = None, + owner_id: str = None, workspace_id: str = None, ): - self.group_name = group_name - self.option = option - self.page_number = page_number - self.page_size = page_size - self.product_types = product_types - self.resource_name = resource_name - self.resource_types = resource_types - self.verbose = verbose + self.count = count + self.dataset_id = dataset_id + self.if_force = if_force + self.owner_id = owner_id self.workspace_id = workspace_id def validate(self): @@ -10108,147 +17476,50 @@ def to_map(self): return _map result = dict() - if self.group_name is not None: - result['GroupName'] = self.group_name - if self.option is not None: - result['Option'] = self.option - if self.page_number is not None: - result['PageNumber'] = self.page_number - if self.page_size is not None: - result['PageSize'] = self.page_size - if self.product_types is not None: - result['ProductTypes'] = self.product_types - if self.resource_name is not None: - result['ResourceName'] = self.resource_name - if self.resource_types is not None: - result['ResourceTypes'] = self.resource_types - if self.verbose is not None: - result['Verbose'] = self.verbose - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('GroupName') is not None: - self.group_name = m.get('GroupName') - if m.get('Option') is not None: - self.option = m.get('Option') - if m.get('PageNumber') is not None: - self.page_number = m.get('PageNumber') - if m.get('PageSize') is not None: - self.page_size = m.get('PageSize') - if m.get('ProductTypes') is not None: - self.product_types = m.get('ProductTypes') - if m.get('ResourceName') is not None: - self.resource_name = m.get('ResourceName') - if m.get('ResourceTypes') is not None: - self.resource_types = m.get('ResourceTypes') - if m.get('Verbose') is not None: - self.verbose = m.get('Verbose') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') - return self - - -class ListResourcesResponseBodyResourcesEncryption(TeaModel): - def __init__( - self, - algorithm: str = None, - enabled: bool = None, - key: str = None, - ): - self.algorithm = algorithm - self.enabled = enabled - self.key = key - - def validate(self): - pass - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.algorithm is not None: - result['Algorithm'] = self.algorithm - if self.enabled is not None: - result['Enabled'] = self.enabled - if self.key is not None: - result['Key'] = self.key - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('Algorithm') is not None: - self.algorithm = m.get('Algorithm') - if m.get('Enabled') is not None: - self.enabled = m.get('Enabled') - if m.get('Key') is not None: - self.key = m.get('Key') - return self - - -class ListResourcesResponseBodyResourcesQuotasSpecs(TeaModel): - def __init__( - self, - name: str = None, - value: str = None, - ): - self.name = name - self.value = value - - def validate(self): - pass - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.name is not None: - result['Name'] = self.name - if self.value is not None: - result['Value'] = self.value + if self.count is not None: + result['Count'] = self.count + if self.dataset_id is not None: + result['DatasetId'] = self.dataset_id + if self.if_force is not None: + result['IfForce'] = self.if_force + if self.owner_id is not None: + result['OwnerId'] = self.owner_id + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Name') is not None: - self.name = m.get('Name') - if m.get('Value') is not None: - self.value = m.get('Value') + if m.get('Count') is not None: + self.count = m.get('Count') + if m.get('DatasetId') is not None: + self.dataset_id = m.get('DatasetId') + if m.get('IfForce') is not None: + self.if_force = m.get('IfForce') + if m.get('OwnerId') is not None: + self.owner_id = m.get('OwnerId') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class ListResourcesResponseBodyResourcesQuotas(TeaModel): +class MigrateDatasetsResponseBody(TeaModel): def __init__( self, - card_type: str = None, - display_name: str = None, - id: str = None, - mode: str = None, - name: str = None, - product_code: str = None, - quota_type: str = None, - specs: List[ListResourcesResponseBodyResourcesQuotasSpecs] = None, + failed_count: int = None, + migrated_count: int = None, + request_id: str = None, + successful_count: int = None, + total_count: int = None, ): - self.card_type = card_type - self.display_name = display_name - self.id = id - self.mode = mode - self.name = name - self.product_code = product_code - self.quota_type = quota_type - self.specs = specs + self.failed_count = failed_count + self.migrated_count = migrated_count + self.request_id = request_id + self.successful_count = successful_count + self.total_count = total_count def validate(self): - if self.specs: - for k in self.specs: - if k: - k.validate() + pass def to_map(self): _map = super().to_map() @@ -10256,92 +17527,47 @@ def to_map(self): return _map result = dict() - if self.card_type is not None: - result['CardType'] = self.card_type - if self.display_name is not None: - result['DisplayName'] = self.display_name - if self.id is not None: - result['Id'] = self.id - if self.mode is not None: - result['Mode'] = self.mode - if self.name is not None: - result['Name'] = self.name - if self.product_code is not None: - result['ProductCode'] = self.product_code - if self.quota_type is not None: - result['QuotaType'] = self.quota_type - result['Specs'] = [] - if self.specs is not None: - for k in self.specs: - result['Specs'].append(k.to_map() if k else None) + if self.failed_count is not None: + result['FailedCount'] = self.failed_count + if self.migrated_count is not None: + result['MigratedCount'] = self.migrated_count + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.successful_count is not None: + result['SuccessfulCount'] = self.successful_count + if self.total_count is not None: + result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() - if m.get('CardType') is not None: - self.card_type = m.get('CardType') - if m.get('DisplayName') is not None: - self.display_name = m.get('DisplayName') - if m.get('Id') is not None: - self.id = m.get('Id') - if m.get('Mode') is not None: - self.mode = m.get('Mode') - if m.get('Name') is not None: - self.name = m.get('Name') - if m.get('ProductCode') is not None: - self.product_code = m.get('ProductCode') - if m.get('QuotaType') is not None: - self.quota_type = m.get('QuotaType') - self.specs = [] - if m.get('Specs') is not None: - for k in m.get('Specs'): - temp_model = ListResourcesResponseBodyResourcesQuotasSpecs() - self.specs.append(temp_model.from_map(k)) + if m.get('FailedCount') is not None: + self.failed_count = m.get('FailedCount') + if m.get('MigratedCount') is not None: + self.migrated_count = m.get('MigratedCount') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('SuccessfulCount') is not None: + self.successful_count = m.get('SuccessfulCount') + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') return self -class ListResourcesResponseBodyResources(TeaModel): +class MigrateDatasetsResponse(TeaModel): def __init__( self, - encryption: ListResourcesResponseBodyResourcesEncryption = None, - env_type: str = None, - executor: Dict[str, ResourcesExecutorValue] = None, - gmt_create_time: str = None, - group_name: str = None, - id: str = None, - is_default: bool = None, - name: str = None, - product_type: str = None, - quotas: List[ListResourcesResponseBodyResourcesQuotas] = None, - resource_type: str = None, - spec: Dict[str, Any] = None, - workspace_id: str = None, + headers: Dict[str, str] = None, + status_code: int = None, + body: MigrateDatasetsResponseBody = None, ): - self.encryption = encryption - self.env_type = env_type - self.executor = executor - self.gmt_create_time = gmt_create_time - self.group_name = group_name - self.id = id - self.is_default = is_default - self.name = name - self.product_type = product_type - self.quotas = quotas - self.resource_type = resource_type - self.spec = spec - self.workspace_id = workspace_id + self.headers = headers + self.status_code = status_code + self.body = body def validate(self): - if self.encryption: - self.encryption.validate() - if self.executor: - for v in self.executor.values(): - if v: - v.validate() - if self.quotas: - for k in self.quotas: - if k: - k.validate() + if self.body: + self.body.validate() def to_map(self): _map = super().to_map() @@ -10349,92 +17575,37 @@ def to_map(self): return _map result = dict() - if self.encryption is not None: - result['Encryption'] = self.encryption.to_map() - if self.env_type is not None: - result['EnvType'] = self.env_type - result['Executor'] = {} - if self.executor is not None: - for k, v in self.executor.items(): - result['Executor'][k] = v.to_map() - if self.gmt_create_time is not None: - result['GmtCreateTime'] = self.gmt_create_time - if self.group_name is not None: - result['GroupName'] = self.group_name - if self.id is not None: - result['Id'] = self.id - if self.is_default is not None: - result['IsDefault'] = self.is_default - if self.name is not None: - result['Name'] = self.name - if self.product_type is not None: - result['ProductType'] = self.product_type - result['Quotas'] = [] - if self.quotas is not None: - for k in self.quotas: - result['Quotas'].append(k.to_map() if k else None) - if self.resource_type is not None: - result['ResourceType'] = self.resource_type - if self.spec is not None: - result['Spec'] = self.spec - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Encryption') is not None: - temp_model = ListResourcesResponseBodyResourcesEncryption() - self.encryption = temp_model.from_map(m['Encryption']) - if m.get('EnvType') is not None: - self.env_type = m.get('EnvType') - self.executor = {} - if m.get('Executor') is not None: - for k, v in m.get('Executor').items(): - temp_model = ResourcesExecutorValue() - self.executor[k] = temp_model.from_map(v) - if m.get('GmtCreateTime') is not None: - self.gmt_create_time = m.get('GmtCreateTime') - if m.get('GroupName') is not None: - self.group_name = m.get('GroupName') - if m.get('Id') is not None: - self.id = m.get('Id') - if m.get('IsDefault') is not None: - self.is_default = m.get('IsDefault') - if m.get('Name') is not None: - self.name = m.get('Name') - if m.get('ProductType') is not None: - self.product_type = m.get('ProductType') - self.quotas = [] - if m.get('Quotas') is not None: - for k in m.get('Quotas'): - temp_model = ListResourcesResponseBodyResourcesQuotas() - self.quotas.append(temp_model.from_map(k)) - if m.get('ResourceType') is not None: - self.resource_type = m.get('ResourceType') - if m.get('Spec') is not None: - self.spec = m.get('Spec') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = MigrateDatasetsResponseBody() + self.body = temp_model.from_map(m['body']) return self -class ListResourcesResponseBody(TeaModel): +class PublishCodeSourceResponseBody(TeaModel): def __init__( self, + code_source_id: str = None, request_id: str = None, - resources: List[ListResourcesResponseBodyResources] = None, - total_count: int = None, ): + self.code_source_id = code_source_id self.request_id = request_id - self.resources = resources - self.total_count = total_count def validate(self): - if self.resources: - for k in self.resources: - if k: - k.validate() + pass def to_map(self): _map = super().to_map() @@ -10442,45 +17613,33 @@ def to_map(self): return _map result = dict() + if self.code_source_id is not None: + result['CodeSourceId'] = self.code_source_id if self.request_id is not None: result['RequestId'] = self.request_id - result['Resources'] = [] - if self.resources is not None: - for k in self.resources: - result['Resources'].append(k.to_map() if k else None) - if self.total_count is not None: - result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() + if m.get('CodeSourceId') is not None: + self.code_source_id = m.get('CodeSourceId') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - self.resources = [] - if m.get('Resources') is not None: - for k in m.get('Resources'): - temp_model = ListResourcesResponseBodyResources() - self.resources.append(temp_model.from_map(k)) - if m.get('TotalCount') is not None: - self.total_count = m.get('TotalCount') return self -class ListResourcesResponse(TeaModel): +class PublishCodeSourceResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListResourcesResponseBody = None, + body: PublishCodeSourceResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -10505,25 +17664,17 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListResourcesResponseBody() + temp_model = PublishCodeSourceResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListUsersRequest(TeaModel): +class PublishDatasetResponseBody(TeaModel): def __init__( self, - account_types: str = None, - page_number: int = None, - page_size: int = None, - user_ids: str = None, - user_name: str = None, + request_id: str = None, ): - self.account_types = account_types - self.page_number = page_number - self.page_size = page_size - self.user_ids = user_ids - self.user_name = user_name + self.request_id = request_id def validate(self): pass @@ -10534,46 +17685,31 @@ def to_map(self): return _map result = dict() - if self.account_types is not None: - result['AccountTypes'] = self.account_types - if self.page_number is not None: - result['PageNumber'] = self.page_number - if self.page_size is not None: - result['PageSize'] = self.page_size - if self.user_ids is not None: - result['UserIds'] = self.user_ids - if self.user_name is not None: - result['UserName'] = self.user_name + if self.request_id is not None: + result['RequestId'] = self.request_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('AccountTypes') is not None: - self.account_types = m.get('AccountTypes') - if m.get('PageNumber') is not None: - self.page_number = m.get('PageNumber') - if m.get('PageSize') is not None: - self.page_size = m.get('PageSize') - if m.get('UserIds') is not None: - self.user_ids = m.get('UserIds') - if m.get('UserName') is not None: - self.user_name = m.get('UserName') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') return self -class ListUsersResponseBodyUsers(TeaModel): +class PublishDatasetResponse(TeaModel): def __init__( self, - display_name: str = None, - user_id: str = None, - user_name: str = None, + headers: Dict[str, str] = None, + status_code: int = None, + body: PublishDatasetResponseBody = None, ): - self.display_name = display_name - self.user_id = user_id - self.user_name = user_name + self.headers = headers + self.status_code = status_code + self.body = body def validate(self): - pass + if self.body: + self.body.validate() def to_map(self): _map = super().to_map() @@ -10581,41 +17717,37 @@ def to_map(self): return _map result = dict() - if self.display_name is not None: - result['DisplayName'] = self.display_name - if self.user_id is not None: - result['UserId'] = self.user_id - if self.user_name is not None: - result['UserName'] = self.user_name + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() return result def from_map(self, m: dict = None): m = m or dict() - if m.get('DisplayName') is not None: - self.display_name = m.get('DisplayName') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') - if m.get('UserName') is not None: - self.user_name = m.get('UserName') + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = PublishDatasetResponseBody() + self.body = temp_model.from_map(m['body']) return self -class ListUsersResponseBody(TeaModel): +class PublishImageResponseBody(TeaModel): def __init__( self, + image_id: str = None, request_id: str = None, - total_count: int = None, - users: List[ListUsersResponseBodyUsers] = None, ): + self.image_id = image_id self.request_id = request_id - self.total_count = total_count - self.users = users def validate(self): - if self.users: - for k in self.users: - if k: - k.validate() + pass def to_map(self): _map = super().to_map() @@ -10623,45 +17755,33 @@ def to_map(self): return _map result = dict() + if self.image_id is not None: + result['ImageId'] = self.image_id if self.request_id is not None: result['RequestId'] = self.request_id - if self.total_count is not None: - result['TotalCount'] = self.total_count - result['Users'] = [] - if self.users is not None: - for k in self.users: - result['Users'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() + if m.get('ImageId') is not None: + self.image_id = m.get('ImageId') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('TotalCount') is not None: - self.total_count = m.get('TotalCount') - self.users = [] - if m.get('Users') is not None: - for k in m.get('Users'): - temp_model = ListUsersResponseBodyUsers() - self.users.append(temp_model.from_map(k)) return self -class ListUsersResponse(TeaModel): +class PublishImageResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListUsersResponseBody = None, + body: PublishImageResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -10686,22 +17806,40 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListUsersResponseBody() + temp_model = PublishImageResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListWorkspaceUsersResponseBodyUsers(TeaModel): +class RegisterLineageRequest(TeaModel): def __init__( self, - user_id: str = None, - user_name: str = None, + attributes: Dict[str, Any] = None, + input_entities: List[LineageEntity] = None, + name: str = None, + output_entities: List[LineageEntity] = None, + qualified_name: str = None, + register_task_as_entity: bool = None, ): - self.user_id = user_id - self.user_name = user_name + self.attributes = attributes + # This parameter is required. + self.input_entities = input_entities + self.name = name + # This parameter is required. + self.output_entities = output_entities + # This parameter is required. + self.qualified_name = qualified_name + self.register_task_as_entity = register_task_as_entity def validate(self): - pass + if self.input_entities: + for k in self.input_entities: + if k: + k.validate() + if self.output_entities: + for k in self.output_entities: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -10709,37 +17847,69 @@ def to_map(self): return _map result = dict() - if self.user_id is not None: - result['UserId'] = self.user_id - if self.user_name is not None: - result['UserName'] = self.user_name + if self.attributes is not None: + result['Attributes'] = self.attributes + result['InputEntities'] = [] + if self.input_entities is not None: + for k in self.input_entities: + result['InputEntities'].append(k.to_map() if k else None) + if self.name is not None: + result['Name'] = self.name + result['OutputEntities'] = [] + if self.output_entities is not None: + for k in self.output_entities: + result['OutputEntities'].append(k.to_map() if k else None) + if self.qualified_name is not None: + result['QualifiedName'] = self.qualified_name + if self.register_task_as_entity is not None: + result['RegisterTaskAsEntity'] = self.register_task_as_entity return result def from_map(self, m: dict = None): m = m or dict() - if m.get('UserId') is not None: - self.user_id = m.get('UserId') - if m.get('UserName') is not None: - self.user_name = m.get('UserName') + if m.get('Attributes') is not None: + self.attributes = m.get('Attributes') + self.input_entities = [] + if m.get('InputEntities') is not None: + for k in m.get('InputEntities'): + temp_model = LineageEntity() + self.input_entities.append(temp_model.from_map(k)) + if m.get('Name') is not None: + self.name = m.get('Name') + self.output_entities = [] + if m.get('OutputEntities') is not None: + for k in m.get('OutputEntities'): + temp_model = LineageEntity() + self.output_entities.append(temp_model.from_map(k)) + if m.get('QualifiedName') is not None: + self.qualified_name = m.get('QualifiedName') + if m.get('RegisterTaskAsEntity') is not None: + self.register_task_as_entity = m.get('RegisterTaskAsEntity') return self -class ListWorkspaceUsersResponseBody(TeaModel): +class RegisterLineageResponseBody(TeaModel): def __init__( self, + all_success: bool = None, + entity_map: Dict[str, Any] = None, + relations: List[Relation] = None, + relationship: Relationship = None, request_id: str = None, - total_count: int = None, - users: List[ListWorkspaceUsersResponseBodyUsers] = None, ): + self.all_success = all_success + self.entity_map = entity_map + self.relations = relations + self.relationship = relationship self.request_id = request_id - self.total_count = total_count - self.users = users def validate(self): - if self.users: - for k in self.users: + if self.relations: + for k in self.relations: if k: k.validate() + if self.relationship: + self.relationship.validate() def to_map(self): _map = super().to_map() @@ -10747,45 +17917,51 @@ def to_map(self): return _map result = dict() + if self.all_success is not None: + result['AllSuccess'] = self.all_success + if self.entity_map is not None: + result['EntityMap'] = self.entity_map + result['Relations'] = [] + if self.relations is not None: + for k in self.relations: + result['Relations'].append(k.to_map() if k else None) + if self.relationship is not None: + result['Relationship'] = self.relationship.to_map() if self.request_id is not None: result['RequestId'] = self.request_id - if self.total_count is not None: - result['TotalCount'] = self.total_count - result['Users'] = [] - if self.users is not None: - for k in self.users: - result['Users'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() + if m.get('AllSuccess') is not None: + self.all_success = m.get('AllSuccess') + if m.get('EntityMap') is not None: + self.entity_map = m.get('EntityMap') + self.relations = [] + if m.get('Relations') is not None: + for k in m.get('Relations'): + temp_model = Relation() + self.relations.append(temp_model.from_map(k)) + if m.get('Relationship') is not None: + temp_model = Relationship() + self.relationship = temp_model.from_map(m['Relationship']) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('TotalCount') is not None: - self.total_count = m.get('TotalCount') - self.users = [] - if m.get('Users') is not None: - for k in m.get('Users'): - temp_model = ListWorkspaceUsersResponseBodyUsers() - self.users.append(temp_model.from_map(k)) return self -class ListWorkspaceUsersResponse(TeaModel): +class RegisterLineageResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListWorkspaceUsersResponseBody = None, + body: RegisterLineageResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -10810,37 +17986,17 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListWorkspaceUsersResponseBody() + temp_model = RegisterLineageResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListWorkspacesRequest(TeaModel): +class RemoveImageResponseBody(TeaModel): def __init__( self, - fields: str = None, - module_list: str = None, - option: str = None, - order: str = None, - page_number: int = None, - page_size: int = None, - sort_by: str = None, - status: str = None, - verbose: bool = None, - workspace_ids: str = None, - workspace_name: str = None, + request_id: str = None, ): - self.fields = fields - self.module_list = module_list - self.option = option - self.order = order - self.page_number = page_number - self.page_size = page_size - self.sort_by = sort_by - self.status = status - self.verbose = verbose - self.workspace_ids = workspace_ids - self.workspace_name = workspace_name + self.request_id = request_id def validate(self): pass @@ -10851,86 +18007,31 @@ def to_map(self): return _map result = dict() - if self.fields is not None: - result['Fields'] = self.fields - if self.module_list is not None: - result['ModuleList'] = self.module_list - if self.option is not None: - result['Option'] = self.option - if self.order is not None: - result['Order'] = self.order - if self.page_number is not None: - result['PageNumber'] = self.page_number - if self.page_size is not None: - result['PageSize'] = self.page_size - if self.sort_by is not None: - result['SortBy'] = self.sort_by - if self.status is not None: - result['Status'] = self.status - if self.verbose is not None: - result['Verbose'] = self.verbose - if self.workspace_ids is not None: - result['WorkspaceIds'] = self.workspace_ids - if self.workspace_name is not None: - result['WorkspaceName'] = self.workspace_name + if self.request_id is not None: + result['RequestId'] = self.request_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Fields') is not None: - self.fields = m.get('Fields') - if m.get('ModuleList') is not None: - self.module_list = m.get('ModuleList') - if m.get('Option') is not None: - self.option = m.get('Option') - if m.get('Order') is not None: - self.order = m.get('Order') - if m.get('PageNumber') is not None: - self.page_number = m.get('PageNumber') - if m.get('PageSize') is not None: - self.page_size = m.get('PageSize') - if m.get('SortBy') is not None: - self.sort_by = m.get('SortBy') - if m.get('Status') is not None: - self.status = m.get('Status') - if m.get('Verbose') is not None: - self.verbose = m.get('Verbose') - if m.get('WorkspaceIds') is not None: - self.workspace_ids = m.get('WorkspaceIds') - if m.get('WorkspaceName') is not None: - self.workspace_name = m.get('WorkspaceName') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') return self -class ListWorkspacesResponseBodyWorkspaces(TeaModel): +class RemoveImageResponse(TeaModel): def __init__( - self, - admin_names: List[str] = None, - creator: str = None, - description: str = None, - env_types: List[str] = None, - extra_infos: Dict[str, Any] = None, - gmt_create_time: str = None, - gmt_modified_time: str = None, - is_default: bool = None, - status: str = None, - workspace_id: str = None, - workspace_name: str = None, + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: RemoveImageResponseBody = None, ): - self.admin_names = admin_names - self.creator = creator - self.description = description - self.env_types = env_types - self.extra_infos = extra_infos - self.gmt_create_time = gmt_create_time - self.gmt_modified_time = gmt_modified_time - self.is_default = is_default - self.status = status - self.workspace_id = workspace_id - self.workspace_name = workspace_name + self.headers = headers + self.status_code = status_code + self.body = body def validate(self): - pass + if self.body: + self.body.validate() def to_map(self): _map = super().to_map() @@ -10938,75 +18039,35 @@ def to_map(self): return _map result = dict() - if self.admin_names is not None: - result['AdminNames'] = self.admin_names - if self.creator is not None: - result['Creator'] = self.creator - if self.description is not None: - result['Description'] = self.description - if self.env_types is not None: - result['EnvTypes'] = self.env_types - if self.extra_infos is not None: - result['ExtraInfos'] = self.extra_infos - if self.gmt_create_time is not None: - result['GmtCreateTime'] = self.gmt_create_time - if self.gmt_modified_time is not None: - result['GmtModifiedTime'] = self.gmt_modified_time - if self.is_default is not None: - result['IsDefault'] = self.is_default - if self.status is not None: - result['Status'] = self.status - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id - if self.workspace_name is not None: - result['WorkspaceName'] = self.workspace_name + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() return result def from_map(self, m: dict = None): m = m or dict() - if m.get('AdminNames') is not None: - self.admin_names = m.get('AdminNames') - if m.get('Creator') is not None: - self.creator = m.get('Creator') - if m.get('Description') is not None: - self.description = m.get('Description') - if m.get('EnvTypes') is not None: - self.env_types = m.get('EnvTypes') - if m.get('ExtraInfos') is not None: - self.extra_infos = m.get('ExtraInfos') - if m.get('GmtCreateTime') is not None: - self.gmt_create_time = m.get('GmtCreateTime') - if m.get('GmtModifiedTime') is not None: - self.gmt_modified_time = m.get('GmtModifiedTime') - if m.get('IsDefault') is not None: - self.is_default = m.get('IsDefault') - if m.get('Status') is not None: - self.status = m.get('Status') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') - if m.get('WorkspaceName') is not None: - self.workspace_name = m.get('WorkspaceName') + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = RemoveImageResponseBody() + self.body = temp_model.from_map(m['body']) return self -class ListWorkspacesResponseBody(TeaModel): +class RemoveImageLabelsResponseBody(TeaModel): def __init__( self, request_id: str = None, - resource_limits: Dict[str, Any] = None, - total_count: int = None, - workspaces: List[ListWorkspacesResponseBodyWorkspaces] = None, ): self.request_id = request_id - self.resource_limits = resource_limits - self.total_count = total_count - self.workspaces = workspaces def validate(self): - if self.workspaces: - for k in self.workspaces: - if k: - k.validate() + pass def to_map(self): _map = super().to_map() @@ -11016,47 +18077,27 @@ def to_map(self): result = dict() if self.request_id is not None: result['RequestId'] = self.request_id - if self.resource_limits is not None: - result['ResourceLimits'] = self.resource_limits - if self.total_count is not None: - result['TotalCount'] = self.total_count - result['Workspaces'] = [] - if self.workspaces is not None: - for k in self.workspaces: - result['Workspaces'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('ResourceLimits') is not None: - self.resource_limits = m.get('ResourceLimits') - if m.get('TotalCount') is not None: - self.total_count = m.get('TotalCount') - self.workspaces = [] - if m.get('Workspaces') is not None: - for k in m.get('Workspaces'): - temp_model = ListWorkspacesResponseBodyWorkspaces() - self.workspaces.append(temp_model.from_map(k)) return self -class ListWorkspacesResponse(TeaModel): +class RemoveImageLabelsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListWorkspacesResponseBody = None, + body: RemoveImageLabelsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -11081,25 +18122,17 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListWorkspacesResponseBody() + temp_model = RemoveImageLabelsResponseBody() self.body = temp_model.from_map(m['body']) return self -class MigrateDatasetsRequest(TeaModel): +class RemoveMemberRoleResponseBody(TeaModel): def __init__( self, - count: int = None, - dataset_id: str = None, - if_force: bool = None, - owner_id: str = None, - workspace_id: str = None, + request_id: str = None, ): - self.count = count - self.dataset_id = dataset_id - self.if_force = if_force - self.owner_id = owner_id - self.workspace_id = workspace_id + self.request_id = request_id def validate(self): pass @@ -11110,47 +18143,64 @@ def to_map(self): return _map result = dict() - if self.count is not None: - result['Count'] = self.count - if self.dataset_id is not None: - result['DatasetId'] = self.dataset_id - if self.if_force is not None: - result['IfForce'] = self.if_force - if self.owner_id is not None: - result['OwnerId'] = self.owner_id - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.request_id is not None: + result['RequestId'] = self.request_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Count') is not None: - self.count = m.get('Count') - if m.get('DatasetId') is not None: - self.dataset_id = m.get('DatasetId') - if m.get('IfForce') is not None: - self.if_force = m.get('IfForce') - if m.get('OwnerId') is not None: - self.owner_id = m.get('OwnerId') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') return self -class MigrateDatasetsResponseBody(TeaModel): +class RemoveMemberRoleResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: RemoveMemberRoleResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = RemoveMemberRoleResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class RemoveWorkspaceQuotaResponseBody(TeaModel): def __init__( self, - failed_count: int = None, - migrated_count: int = None, request_id: str = None, - successful_count: int = None, - total_count: int = None, ): - self.failed_count = failed_count - self.migrated_count = migrated_count self.request_id = request_id - self.successful_count = successful_count - self.total_count = total_count def validate(self): pass @@ -11161,48 +18211,29 @@ def to_map(self): return _map result = dict() - if self.failed_count is not None: - result['FailedCount'] = self.failed_count - if self.migrated_count is not None: - result['MigratedCount'] = self.migrated_count if self.request_id is not None: result['RequestId'] = self.request_id - if self.successful_count is not None: - result['SuccessfulCount'] = self.successful_count - if self.total_count is not None: - result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() - if m.get('FailedCount') is not None: - self.failed_count = m.get('FailedCount') - if m.get('MigratedCount') is not None: - self.migrated_count = m.get('MigratedCount') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('SuccessfulCount') is not None: - self.successful_count = m.get('SuccessfulCount') - if m.get('TotalCount') is not None: - self.total_count = m.get('TotalCount') return self -class MigrateDatasetsResponse(TeaModel): +class RemoveWorkspaceQuotaResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: MigrateDatasetsResponseBody = None, + body: RemoveWorkspaceQuotaResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -11227,18 +18258,51 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = MigrateDatasetsResponseBody() + temp_model = RemoveWorkspaceQuotaResponseBody() self.body = temp_model.from_map(m['body']) return self -class PublishCodeSourceResponseBody(TeaModel): +class SetExperimentLabelsRequest(TeaModel): + def __init__( + self, + labels: List[LabelInfo] = None, + ): + self.labels = labels + + def validate(self): + if self.labels: + for k in self.labels: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = LabelInfo() + self.labels.append(temp_model.from_map(k)) + return self + + +class SetExperimentLabelsResponseBody(TeaModel): def __init__( self, - code_source_id: str = None, request_id: str = None, ): - self.code_source_id = code_source_id self.request_id = request_id def validate(self): @@ -11250,36 +18314,29 @@ def to_map(self): return _map result = dict() - if self.code_source_id is not None: - result['CodeSourceId'] = self.code_source_id if self.request_id is not None: result['RequestId'] = self.request_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('CodeSourceId') is not None: - self.code_source_id = m.get('CodeSourceId') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') return self -class PublishCodeSourceResponse(TeaModel): +class SetExperimentLabelsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: PublishCodeSourceResponseBody = None, + body: SetExperimentLabelsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -11304,12 +18361,47 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = PublishCodeSourceResponseBody() + temp_model = SetExperimentLabelsResponseBody() self.body = temp_model.from_map(m['body']) return self -class PublishDatasetResponseBody(TeaModel): +class SetTrialLabelsRequest(TeaModel): + def __init__( + self, + labels: List[LabelInfo] = None, + ): + self.labels = labels + + def validate(self): + if self.labels: + for k in self.labels: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = LabelInfo() + self.labels.append(temp_model.from_map(k)) + return self + + +class SetTrialLabelsResponseBody(TeaModel): def __init__( self, request_id: str = None, @@ -11336,21 +18428,18 @@ def from_map(self, m: dict = None): return self -class PublishDatasetResponse(TeaModel): +class SetTrialLabelsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: PublishDatasetResponseBody = None, + body: SetTrialLabelsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -11375,19 +18464,21 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = PublishDatasetResponseBody() + temp_model = SetTrialLabelsResponseBody() self.body = temp_model.from_map(m['body']) return self -class PublishImageResponseBody(TeaModel): +class SetUserConfigsRequestConfigs(TeaModel): def __init__( self, - image_id: str = None, - request_id: str = None, + category_name: str = None, + config_key: str = None, + config_value: str = None, ): - self.image_id = image_id - self.request_id = request_id + self.category_name = category_name + self.config_key = config_key + self.config_value = config_value def validate(self): pass @@ -11398,38 +18489,37 @@ def to_map(self): return _map result = dict() - if self.image_id is not None: - result['ImageId'] = self.image_id - if self.request_id is not None: - result['RequestId'] = self.request_id + if self.category_name is not None: + result['CategoryName'] = self.category_name + if self.config_key is not None: + result['ConfigKey'] = self.config_key + if self.config_value is not None: + result['ConfigValue'] = self.config_value return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ImageId') is not None: - self.image_id = m.get('ImageId') - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') + if m.get('CategoryName') is not None: + self.category_name = m.get('CategoryName') + if m.get('ConfigKey') is not None: + self.config_key = m.get('ConfigKey') + if m.get('ConfigValue') is not None: + self.config_value = m.get('ConfigValue') return self -class PublishImageResponse(TeaModel): +class SetUserConfigsRequest(TeaModel): def __init__( self, - headers: Dict[str, str] = None, - status_code: int = None, - body: PublishImageResponseBody = None, + configs: List[SetUserConfigsRequestConfigs] = None, ): - self.headers = headers - self.status_code = status_code - self.body = body + self.configs = configs def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() + if self.configs: + for k in self.configs: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -11437,27 +18527,23 @@ def to_map(self): return _map result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + result['Configs'] = [] + if self.configs is not None: + for k in self.configs: + result['Configs'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = PublishImageResponseBody() - self.body = temp_model.from_map(m['body']) + self.configs = [] + if m.get('Configs') is not None: + for k in m.get('Configs'): + temp_model = SetUserConfigsRequestConfigs() + self.configs.append(temp_model.from_map(k)) return self -class RemoveImageResponseBody(TeaModel): +class SetUserConfigsResponseBody(TeaModel): def __init__( self, request_id: str = None, @@ -11484,21 +18570,18 @@ def from_map(self, m: dict = None): return self -class RemoveImageResponse(TeaModel): +class SetUserConfigsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: RemoveImageResponseBody = None, + body: SetUserConfigsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -11523,12 +18606,12 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = RemoveImageResponseBody() + temp_model = SetUserConfigsResponseBody() self.body = temp_model.from_map(m['body']) return self -class RemoveImageLabelsResponseBody(TeaModel): +class SyncUsersResponseBody(TeaModel): def __init__( self, request_id: str = None, @@ -11555,21 +18638,18 @@ def from_map(self, m: dict = None): return self -class RemoveImageLabelsResponse(TeaModel): +class SyncUsersResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: RemoveImageLabelsResponseBody = None, + body: SyncUsersResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -11594,17 +18674,19 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = RemoveImageLabelsResponseBody() + temp_model = SyncUsersResponseBody() self.body = temp_model.from_map(m['body']) return self -class RemoveMemberRoleResponseBody(TeaModel): +class UpdateConfigsRequestConfigsLabels(TeaModel): def __init__( self, - request_id: str = None, + key: str = None, + value: str = None, ): - self.request_id = request_id + self.key = key + self.value = value def validate(self): pass @@ -11615,34 +18697,37 @@ def to_map(self): return _map result = dict() - if self.request_id is not None: - result['RequestId'] = self.request_id + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value return result def from_map(self, m: dict = None): m = m or dict() - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') return self -class RemoveMemberRoleResponse(TeaModel): +class UpdateConfigsRequestConfigs(TeaModel): def __init__( self, - headers: Dict[str, str] = None, - status_code: int = None, - body: RemoveMemberRoleResponseBody = None, + config_key: str = None, + config_value: str = None, + labels: List[UpdateConfigsRequestConfigsLabels] = None, ): - self.headers = headers - self.status_code = status_code - self.body = body + self.config_key = config_key + self.config_value = config_value + self.labels = labels def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() + if self.labels: + for k in self.labels: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -11650,27 +18735,66 @@ def to_map(self): return _map result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + if self.config_key is not None: + result['ConfigKey'] = self.config_key + if self.config_value is not None: + result['ConfigValue'] = self.config_value + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = RemoveMemberRoleResponseBody() - self.body = temp_model.from_map(m['body']) + if m.get('ConfigKey') is not None: + self.config_key = m.get('ConfigKey') + if m.get('ConfigValue') is not None: + self.config_value = m.get('ConfigValue') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = UpdateConfigsRequestConfigsLabels() + self.labels.append(temp_model.from_map(k)) return self -class RemoveWorkspaceQuotaResponseBody(TeaModel): +class UpdateConfigsRequest(TeaModel): + def __init__( + self, + configs: List[UpdateConfigsRequestConfigs] = None, + ): + self.configs = configs + + def validate(self): + if self.configs: + for k in self.configs: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + result['Configs'] = [] + if self.configs is not None: + for k in self.configs: + result['Configs'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + self.configs = [] + if m.get('Configs') is not None: + for k in m.get('Configs'): + temp_model = UpdateConfigsRequestConfigs() + self.configs.append(temp_model.from_map(k)) + return self + + +class UpdateConfigsResponseBody(TeaModel): def __init__( self, request_id: str = None, @@ -11697,21 +18821,18 @@ def from_map(self, m: dict = None): return self -class RemoveWorkspaceQuotaResponse(TeaModel): +class UpdateConfigsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: RemoveWorkspaceQuotaResponseBody = None, + body: UpdateConfigsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -11736,12 +18857,51 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = RemoveWorkspaceQuotaResponseBody() + temp_model = UpdateConfigsResponseBody() self.body = temp_model.from_map(m['body']) return self -class SyncUsersResponseBody(TeaModel): +class UpdateDatasetRequest(TeaModel): + def __init__( + self, + description: str = None, + name: str = None, + options: str = None, + ): + self.description = description + self.name = name + self.options = options + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.description is not None: + result['Description'] = self.description + if self.name is not None: + result['Name'] = self.name + if self.options is not None: + result['Options'] = self.options + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Description') is not None: + self.description = m.get('Description') + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('Options') is not None: + self.options = m.get('Options') + return self + + +class UpdateDatasetResponseBody(TeaModel): def __init__( self, request_id: str = None, @@ -11768,21 +18928,18 @@ def from_map(self, m: dict = None): return self -class SyncUsersResponse(TeaModel): +class UpdateDatasetResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: SyncUsersResponseBody = None, + body: UpdateDatasetResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -11807,19 +18964,23 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = SyncUsersResponseBody() + temp_model = UpdateDatasetResponseBody() self.body = temp_model.from_map(m['body']) return self -class UpdateConfigsRequestConfigs(TeaModel): +class UpdateDatasetVersionRequest(TeaModel): def __init__( self, - config_key: str = None, - config_value: str = None, + data_count: int = None, + data_size: int = None, + description: str = None, + options: str = None, ): - self.config_key = config_key - self.config_value = config_value + self.data_count = data_count + self.data_size = data_size + self.description = description + self.options = options def validate(self): pass @@ -11830,57 +18991,30 @@ def to_map(self): return _map result = dict() - if self.config_key is not None: - result['ConfigKey'] = self.config_key - if self.config_value is not None: - result['ConfigValue'] = self.config_value - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('ConfigKey') is not None: - self.config_key = m.get('ConfigKey') - if m.get('ConfigValue') is not None: - self.config_value = m.get('ConfigValue') - return self - - -class UpdateConfigsRequest(TeaModel): - def __init__( - self, - configs: List[UpdateConfigsRequestConfigs] = None, - ): - self.configs = configs - - def validate(self): - if self.configs: - for k in self.configs: - if k: - k.validate() - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - result['Configs'] = [] - if self.configs is not None: - for k in self.configs: - result['Configs'].append(k.to_map() if k else None) + if self.data_count is not None: + result['DataCount'] = self.data_count + if self.data_size is not None: + result['DataSize'] = self.data_size + if self.description is not None: + result['Description'] = self.description + if self.options is not None: + result['Options'] = self.options return result def from_map(self, m: dict = None): m = m or dict() - self.configs = [] - if m.get('Configs') is not None: - for k in m.get('Configs'): - temp_model = UpdateConfigsRequestConfigs() - self.configs.append(temp_model.from_map(k)) + if m.get('DataCount') is not None: + self.data_count = m.get('DataCount') + if m.get('DataSize') is not None: + self.data_size = m.get('DataSize') + if m.get('Description') is not None: + self.description = m.get('Description') + if m.get('Options') is not None: + self.options = m.get('Options') return self -class UpdateConfigsResponseBody(TeaModel): +class UpdateDatasetVersionResponseBody(TeaModel): def __init__( self, request_id: str = None, @@ -11907,21 +19041,18 @@ def from_map(self, m: dict = None): return self -class UpdateConfigsResponse(TeaModel): +class UpdateDatasetVersionResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: UpdateConfigsResponseBody = None, + body: UpdateDatasetVersionResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -11946,21 +19077,17 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = UpdateConfigsResponseBody() + temp_model = UpdateDatasetVersionResponseBody() self.body = temp_model.from_map(m['body']) return self -class UpdateDatasetRequest(TeaModel): +class UpdateDefaultWorkspaceRequest(TeaModel): def __init__( self, - description: str = None, - name: str = None, - options: str = None, + workspace_id: str = None, ): - self.description = description - self.name = name - self.options = options + self.workspace_id = workspace_id def validate(self): pass @@ -11971,26 +19098,18 @@ def to_map(self): return _map result = dict() - if self.description is not None: - result['Description'] = self.description - if self.name is not None: - result['Name'] = self.name - if self.options is not None: - result['Options'] = self.options + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Description') is not None: - self.description = m.get('Description') - if m.get('Name') is not None: - self.name = m.get('Name') - if m.get('Options') is not None: - self.options = m.get('Options') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class UpdateDatasetResponseBody(TeaModel): +class UpdateDefaultWorkspaceResponseBody(TeaModel): def __init__( self, request_id: str = None, @@ -12017,21 +19136,18 @@ def from_map(self, m: dict = None): return self -class UpdateDatasetResponse(TeaModel): +class UpdateDefaultWorkspaceResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: UpdateDatasetResponseBody = None, + body: UpdateDefaultWorkspaceResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -12056,17 +19172,20 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = UpdateDatasetResponseBody() + temp_model = UpdateDefaultWorkspaceResponseBody() self.body = temp_model.from_map(m['body']) return self -class UpdateDefaultWorkspaceRequest(TeaModel): +class UpdateExperimentRequest(TeaModel): def __init__( self, - workspace_id: str = None, + accessibility: str = None, + name: str = None, ): - self.workspace_id = workspace_id + self.accessibility = accessibility + # 名称 + self.name = name def validate(self): pass @@ -12077,18 +19196,22 @@ def to_map(self): return _map result = dict() - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + if self.name is not None: + result['Name'] = self.name return result def from_map(self, m: dict = None): m = m or dict() - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + if m.get('Name') is not None: + self.name = m.get('Name') return self -class UpdateDefaultWorkspaceResponseBody(TeaModel): +class UpdateExperimentResponseBody(TeaModel): def __init__( self, request_id: str = None, @@ -12115,21 +19238,18 @@ def from_map(self, m: dict = None): return self -class UpdateDefaultWorkspaceResponse(TeaModel): +class UpdateExperimentResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: UpdateDefaultWorkspaceResponseBody = None, + body: UpdateExperimentResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -12154,7 +19274,7 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = UpdateDefaultWorkspaceResponseBody() + temp_model = UpdateExperimentResponseBody() self.body = temp_model.from_map(m['body']) return self @@ -12164,17 +19284,23 @@ def __init__( self, accessibility: str = None, domain: str = None, + extra_info: Dict[str, Any] = None, model_description: str = None, model_doc: str = None, model_name: str = None, + model_type: str = None, + order_number: int = None, origin: str = None, task: str = None, ): self.accessibility = accessibility self.domain = domain + self.extra_info = extra_info self.model_description = model_description self.model_doc = model_doc self.model_name = model_name + self.model_type = model_type + self.order_number = order_number self.origin = origin self.task = task @@ -12191,12 +19317,18 @@ def to_map(self): result['Accessibility'] = self.accessibility if self.domain is not None: result['Domain'] = self.domain + if self.extra_info is not None: + result['ExtraInfo'] = self.extra_info if self.model_description is not None: result['ModelDescription'] = self.model_description if self.model_doc is not None: result['ModelDoc'] = self.model_doc if self.model_name is not None: result['ModelName'] = self.model_name + if self.model_type is not None: + result['ModelType'] = self.model_type + if self.order_number is not None: + result['OrderNumber'] = self.order_number if self.origin is not None: result['Origin'] = self.origin if self.task is not None: @@ -12209,12 +19341,18 @@ def from_map(self, m: dict = None): self.accessibility = m.get('Accessibility') if m.get('Domain') is not None: self.domain = m.get('Domain') + if m.get('ExtraInfo') is not None: + self.extra_info = m.get('ExtraInfo') if m.get('ModelDescription') is not None: self.model_description = m.get('ModelDescription') if m.get('ModelDoc') is not None: self.model_doc = m.get('ModelDoc') if m.get('ModelName') is not None: self.model_name = m.get('ModelName') + if m.get('ModelType') is not None: + self.model_type = m.get('ModelType') + if m.get('OrderNumber') is not None: + self.order_number = m.get('OrderNumber') if m.get('Origin') is not None: self.origin = m.get('Origin') if m.get('Task') is not None: @@ -12261,9 +19399,6 @@ def __init__( self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -12471,9 +19606,6 @@ def __init__( self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -12507,6 +19639,9 @@ class UpdateModelVersionRequest(TeaModel): def __init__( self, approval_status: str = None, + compression_spec: Dict[str, Any] = None, + evaluation_spec: Dict[str, Any] = None, + extra_info: Dict[str, Any] = None, inference_spec: Dict[str, Any] = None, metrics: Dict[str, Any] = None, options: str = None, @@ -12516,6 +19651,9 @@ def __init__( version_description: str = None, ): self.approval_status = approval_status + self.compression_spec = compression_spec + self.evaluation_spec = evaluation_spec + self.extra_info = extra_info self.inference_spec = inference_spec self.metrics = metrics self.options = options @@ -12535,6 +19673,12 @@ def to_map(self): result = dict() if self.approval_status is not None: result['ApprovalStatus'] = self.approval_status + if self.compression_spec is not None: + result['CompressionSpec'] = self.compression_spec + if self.evaluation_spec is not None: + result['EvaluationSpec'] = self.evaluation_spec + if self.extra_info is not None: + result['ExtraInfo'] = self.extra_info if self.inference_spec is not None: result['InferenceSpec'] = self.inference_spec if self.metrics is not None: @@ -12555,24 +19699,149 @@ def from_map(self, m: dict = None): m = m or dict() if m.get('ApprovalStatus') is not None: self.approval_status = m.get('ApprovalStatus') + if m.get('CompressionSpec') is not None: + self.compression_spec = m.get('CompressionSpec') + if m.get('EvaluationSpec') is not None: + self.evaluation_spec = m.get('EvaluationSpec') + if m.get('ExtraInfo') is not None: + self.extra_info = m.get('ExtraInfo') + if m.get('InferenceSpec') is not None: + self.inference_spec = m.get('InferenceSpec') + if m.get('Metrics') is not None: + self.metrics = m.get('Metrics') + if m.get('Options') is not None: + self.options = m.get('Options') + if m.get('SourceId') is not None: + self.source_id = m.get('SourceId') + if m.get('SourceType') is not None: + self.source_type = m.get('SourceType') + if m.get('TrainingSpec') is not None: + self.training_spec = m.get('TrainingSpec') + if m.get('VersionDescription') is not None: + self.version_description = m.get('VersionDescription') + return self + + +class UpdateModelVersionResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class UpdateModelVersionResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: UpdateModelVersionResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = UpdateModelVersionResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class UpdateServiceTemplateRequest(TeaModel): + def __init__( + self, + inference_spec: Dict[str, Any] = None, + order_number: int = None, + service_template_description: str = None, + service_template_doc: str = None, + service_template_name: str = None, + ): + self.inference_spec = inference_spec + self.order_number = order_number + self.service_template_description = service_template_description + self.service_template_doc = service_template_doc + self.service_template_name = service_template_name + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.inference_spec is not None: + result['InferenceSpec'] = self.inference_spec + if self.order_number is not None: + result['OrderNumber'] = self.order_number + if self.service_template_description is not None: + result['ServiceTemplateDescription'] = self.service_template_description + if self.service_template_doc is not None: + result['ServiceTemplateDoc'] = self.service_template_doc + if self.service_template_name is not None: + result['ServiceTemplateName'] = self.service_template_name + return result + + def from_map(self, m: dict = None): + m = m or dict() if m.get('InferenceSpec') is not None: self.inference_spec = m.get('InferenceSpec') - if m.get('Metrics') is not None: - self.metrics = m.get('Metrics') - if m.get('Options') is not None: - self.options = m.get('Options') - if m.get('SourceId') is not None: - self.source_id = m.get('SourceId') - if m.get('SourceType') is not None: - self.source_type = m.get('SourceType') - if m.get('TrainingSpec') is not None: - self.training_spec = m.get('TrainingSpec') - if m.get('VersionDescription') is not None: - self.version_description = m.get('VersionDescription') + if m.get('OrderNumber') is not None: + self.order_number = m.get('OrderNumber') + if m.get('ServiceTemplateDescription') is not None: + self.service_template_description = m.get('ServiceTemplateDescription') + if m.get('ServiceTemplateDoc') is not None: + self.service_template_doc = m.get('ServiceTemplateDoc') + if m.get('ServiceTemplateName') is not None: + self.service_template_name = m.get('ServiceTemplateName') return self -class UpdateModelVersionResponseBody(TeaModel): +class UpdateServiceTemplateResponseBody(TeaModel): def __init__( self, request_id: str = None, @@ -12599,21 +19868,18 @@ def from_map(self, m: dict = None): return self -class UpdateModelVersionResponse(TeaModel): +class UpdateServiceTemplateResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: UpdateModelVersionResponseBody = None, + body: UpdateServiceTemplateResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -12638,7 +19904,7 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = UpdateModelVersionResponseBody() + temp_model = UpdateServiceTemplateResponseBody() self.body = temp_model.from_map(m['body']) return self @@ -12715,9 +19981,6 @@ def __init__( self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -12747,21 +20010,63 @@ def from_map(self, m: dict = None): return self +class UpdateWorkspaceResourceRequestLabels(TeaModel): + def __init__( + self, + key: str = None, + value: str = None, + ): + self.key = key + self.value = value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + class UpdateWorkspaceResourceRequest(TeaModel): def __init__( self, group_name: str = None, is_default: bool = None, + labels: List[UpdateWorkspaceResourceRequestLabels] = None, product_type: str = None, + resource_ids: List[str] = None, resource_type: str = None, + spec: Dict[str, Any] = None, ): self.group_name = group_name self.is_default = is_default + self.labels = labels self.product_type = product_type + self.resource_ids = resource_ids self.resource_type = resource_type + self.spec = spec def validate(self): - pass + if self.labels: + for k in self.labels: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -12773,10 +20078,18 @@ def to_map(self): result['GroupName'] = self.group_name if self.is_default is not None: result['IsDefault'] = self.is_default + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) if self.product_type is not None: result['ProductType'] = self.product_type + if self.resource_ids is not None: + result['ResourceIds'] = self.resource_ids if self.resource_type is not None: result['ResourceType'] = self.resource_type + if self.spec is not None: + result['Spec'] = self.spec return result def from_map(self, m: dict = None): @@ -12785,10 +20098,19 @@ def from_map(self, m: dict = None): self.group_name = m.get('GroupName') if m.get('IsDefault') is not None: self.is_default = m.get('IsDefault') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = UpdateWorkspaceResourceRequestLabels() + self.labels.append(temp_model.from_map(k)) if m.get('ProductType') is not None: self.product_type = m.get('ProductType') + if m.get('ResourceIds') is not None: + self.resource_ids = m.get('ResourceIds') if m.get('ResourceType') is not None: self.resource_type = m.get('ResourceType') + if m.get('Spec') is not None: + self.spec = m.get('Spec') return self @@ -12796,8 +20118,10 @@ class UpdateWorkspaceResourceResponseBody(TeaModel): def __init__( self, request_id: str = None, + resource_ids: List[str] = None, ): self.request_id = request_id + self.resource_ids = resource_ids def validate(self): pass @@ -12810,12 +20134,16 @@ def to_map(self): result = dict() if self.request_id is not None: result['RequestId'] = self.request_id + if self.resource_ids is not None: + result['ResourceIds'] = self.resource_ids return result def from_map(self, m: dict = None): m = m or dict() if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('ResourceIds') is not None: + self.resource_ids = m.get('ResourceIds') return self @@ -12831,9 +20159,6 @@ def __init__( self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -12863,3 +20188,237 @@ def from_map(self, m: dict = None): return self +class UpdateWorkspaceRoleRequestModulePermissionsPermissionsPermissionRules(TeaModel): + def __init__( + self, + accessibility: str = None, + entity_access_type: str = None, + ): + self.accessibility = accessibility + self.entity_access_type = entity_access_type + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + if self.entity_access_type is not None: + result['EntityAccessType'] = self.entity_access_type + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + if m.get('EntityAccessType') is not None: + self.entity_access_type = m.get('EntityAccessType') + return self + + +class UpdateWorkspaceRoleRequestModulePermissionsPermissions(TeaModel): + def __init__( + self, + permission_codes: List[str] = None, + permission_rules: List[UpdateWorkspaceRoleRequestModulePermissionsPermissionsPermissionRules] = None, + ): + self.permission_codes = permission_codes + self.permission_rules = permission_rules + + def validate(self): + if self.permission_rules: + for k in self.permission_rules: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.permission_codes is not None: + result['PermissionCodes'] = self.permission_codes + result['PermissionRules'] = [] + if self.permission_rules is not None: + for k in self.permission_rules: + result['PermissionRules'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('PermissionCodes') is not None: + self.permission_codes = m.get('PermissionCodes') + self.permission_rules = [] + if m.get('PermissionRules') is not None: + for k in m.get('PermissionRules'): + temp_model = UpdateWorkspaceRoleRequestModulePermissionsPermissionsPermissionRules() + self.permission_rules.append(temp_model.from_map(k)) + return self + + +class UpdateWorkspaceRoleRequestModulePermissions(TeaModel): + def __init__( + self, + module_name: str = None, + permission_type: str = None, + permissions: List[UpdateWorkspaceRoleRequestModulePermissionsPermissions] = None, + ): + self.module_name = module_name + self.permission_type = permission_type + self.permissions = permissions + + def validate(self): + if self.permissions: + for k in self.permissions: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.module_name is not None: + result['ModuleName'] = self.module_name + if self.permission_type is not None: + result['PermissionType'] = self.permission_type + result['Permissions'] = [] + if self.permissions is not None: + for k in self.permissions: + result['Permissions'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ModuleName') is not None: + self.module_name = m.get('ModuleName') + if m.get('PermissionType') is not None: + self.permission_type = m.get('PermissionType') + self.permissions = [] + if m.get('Permissions') is not None: + for k in m.get('Permissions'): + temp_model = UpdateWorkspaceRoleRequestModulePermissionsPermissions() + self.permissions.append(temp_model.from_map(k)) + return self + + +class UpdateWorkspaceRoleRequest(TeaModel): + def __init__( + self, + module_permissions: List[UpdateWorkspaceRoleRequestModulePermissions] = None, + role_name: str = None, + ): + self.module_permissions = module_permissions + self.role_name = role_name + + def validate(self): + if self.module_permissions: + for k in self.module_permissions: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + result['ModulePermissions'] = [] + if self.module_permissions is not None: + for k in self.module_permissions: + result['ModulePermissions'].append(k.to_map() if k else None) + if self.role_name is not None: + result['RoleName'] = self.role_name + return result + + def from_map(self, m: dict = None): + m = m or dict() + self.module_permissions = [] + if m.get('ModulePermissions') is not None: + for k in m.get('ModulePermissions'): + temp_model = UpdateWorkspaceRoleRequestModulePermissions() + self.module_permissions.append(temp_model.from_map(k)) + if m.get('RoleName') is not None: + self.role_name = m.get('RoleName') + return self + + +class UpdateWorkspaceRoleResponseBody(TeaModel): + def __init__( + self, + instance_job_id: str = None, + request_id: str = None, + ): + self.instance_job_id = instance_job_id + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.instance_job_id is not None: + result['InstanceJobId'] = self.instance_job_id + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('InstanceJobId') is not None: + self.instance_job_id = m.get('InstanceJobId') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class UpdateWorkspaceRoleResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: UpdateWorkspaceRoleResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = UpdateWorkspaceRoleResponseBody() + self.body = temp_model.from_map(m['body']) + return self diff --git a/pai/libs/alibabacloud_pai_dsw20220101/__init__.py b/pai/libs/alibabacloud_pai_dsw20220101/__init__.py new file mode 100644 index 0000000..f94ba41 --- /dev/null +++ b/pai/libs/alibabacloud_pai_dsw20220101/__init__.py @@ -0,0 +1 @@ +__version__ = '1.3.0' \ No newline at end of file diff --git a/pai/libs/alibabacloud_pai_dsw20220101/client.py b/pai/libs/alibabacloud_pai_dsw20220101/client.py new file mode 100644 index 0000000..9ddc7c3 --- /dev/null +++ b/pai/libs/alibabacloud_pai_dsw20220101/client.py @@ -0,0 +1,2162 @@ +# -*- coding: utf-8 -*- +# This file is auto-generated, don't edit it. Thanks. +from typing import Dict +from Tea.core import TeaCore + +from alibabacloud_tea_openapi.client import Client as OpenApiClient +from alibabacloud_tea_openapi import models as open_api_models +from alibabacloud_tea_util.client import Client as UtilClient +from alibabacloud_endpoint_util.client import Client as EndpointUtilClient +from alibabacloud_tea_util import models as util_models +from alibabacloud_openapi_util.client import Client as OpenApiUtilClient + +from pai.libs.alibabacloud_pai_dsw20220101 import models as pai_dsw_20220101_models + +class Client(OpenApiClient): + """ + *\ + """ + def __init__( + self, + config: open_api_models.Config, + ): + super().__init__(config) + self._endpoint_rule = '' + self.check_config(config) + self._endpoint = self.get_endpoint('pai-dsw', self._region_id, self._endpoint_rule, self._network, self._suffix, self._endpoint_map, self._endpoint) + + def get_endpoint( + self, + product_id: str, + region_id: str, + endpoint_rule: str, + network: str, + suffix: str, + endpoint_map: Dict[str, str], + endpoint: str, + ) -> str: + if not UtilClient.empty(endpoint): + return endpoint + if not UtilClient.is_unset(endpoint_map) and not UtilClient.empty(endpoint_map.get(region_id)): + return endpoint_map.get(region_id) + return EndpointUtilClient.get_endpoint_rules(product_id, region_id, endpoint_rule, network, suffix) + + def create_idle_instance_culler_with_options( + self, + instance_id: str, + request: pai_dsw_20220101_models.CreateIdleInstanceCullerRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.CreateIdleInstanceCullerResponse: + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.cpu_percent_threshold): + body['CpuPercentThreshold'] = request.cpu_percent_threshold + if not UtilClient.is_unset(request.gpu_percent_threshold): + body['GpuPercentThreshold'] = request.gpu_percent_threshold + if not UtilClient.is_unset(request.max_idle_time_in_minutes): + body['MaxIdleTimeInMinutes'] = request.max_idle_time_in_minutes + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='CreateIdleInstanceCuller', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/idleinstanceculler', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.CreateIdleInstanceCullerResponse(), + self.call_api(params, req, runtime) + ) + + async def create_idle_instance_culler_with_options_async( + self, + instance_id: str, + request: pai_dsw_20220101_models.CreateIdleInstanceCullerRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.CreateIdleInstanceCullerResponse: + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.cpu_percent_threshold): + body['CpuPercentThreshold'] = request.cpu_percent_threshold + if not UtilClient.is_unset(request.gpu_percent_threshold): + body['GpuPercentThreshold'] = request.gpu_percent_threshold + if not UtilClient.is_unset(request.max_idle_time_in_minutes): + body['MaxIdleTimeInMinutes'] = request.max_idle_time_in_minutes + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='CreateIdleInstanceCuller', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/idleinstanceculler', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.CreateIdleInstanceCullerResponse(), + await self.call_api_async(params, req, runtime) + ) + + def create_idle_instance_culler( + self, + instance_id: str, + request: pai_dsw_20220101_models.CreateIdleInstanceCullerRequest, + ) -> pai_dsw_20220101_models.CreateIdleInstanceCullerResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return self.create_idle_instance_culler_with_options(instance_id, request, headers, runtime) + + async def create_idle_instance_culler_async( + self, + instance_id: str, + request: pai_dsw_20220101_models.CreateIdleInstanceCullerRequest, + ) -> pai_dsw_20220101_models.CreateIdleInstanceCullerResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return await self.create_idle_instance_culler_with_options_async(instance_id, request, headers, runtime) + + def create_instance_with_options( + self, + request: pai_dsw_20220101_models.CreateInstanceRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.CreateInstanceResponse: + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.accessibility): + body['Accessibility'] = request.accessibility + if not UtilClient.is_unset(request.cloud_disks): + body['CloudDisks'] = request.cloud_disks + if not UtilClient.is_unset(request.datasets): + body['Datasets'] = request.datasets + if not UtilClient.is_unset(request.driver): + body['Driver'] = request.driver + if not UtilClient.is_unset(request.ecs_spec): + body['EcsSpec'] = request.ecs_spec + if not UtilClient.is_unset(request.environment_variables): + body['EnvironmentVariables'] = request.environment_variables + if not UtilClient.is_unset(request.image_id): + body['ImageId'] = request.image_id + if not UtilClient.is_unset(request.image_url): + body['ImageUrl'] = request.image_url + if not UtilClient.is_unset(request.instance_name): + body['InstanceName'] = request.instance_name + if not UtilClient.is_unset(request.labels): + body['Labels'] = request.labels + if not UtilClient.is_unset(request.priority): + body['Priority'] = request.priority + if not UtilClient.is_unset(request.requested_resource): + body['RequestedResource'] = request.requested_resource + if not UtilClient.is_unset(request.resource_id): + body['ResourceId'] = request.resource_id + if not UtilClient.is_unset(request.user_id): + body['UserId'] = request.user_id + if not UtilClient.is_unset(request.user_vpc): + body['UserVpc'] = request.user_vpc + if not UtilClient.is_unset(request.workspace_id): + body['WorkspaceId'] = request.workspace_id + if not UtilClient.is_unset(request.workspace_source): + body['WorkspaceSource'] = request.workspace_source + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='CreateInstance', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.CreateInstanceResponse(), + self.call_api(params, req, runtime) + ) + + async def create_instance_with_options_async( + self, + request: pai_dsw_20220101_models.CreateInstanceRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.CreateInstanceResponse: + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.accessibility): + body['Accessibility'] = request.accessibility + if not UtilClient.is_unset(request.cloud_disks): + body['CloudDisks'] = request.cloud_disks + if not UtilClient.is_unset(request.datasets): + body['Datasets'] = request.datasets + if not UtilClient.is_unset(request.driver): + body['Driver'] = request.driver + if not UtilClient.is_unset(request.ecs_spec): + body['EcsSpec'] = request.ecs_spec + if not UtilClient.is_unset(request.environment_variables): + body['EnvironmentVariables'] = request.environment_variables + if not UtilClient.is_unset(request.image_id): + body['ImageId'] = request.image_id + if not UtilClient.is_unset(request.image_url): + body['ImageUrl'] = request.image_url + if not UtilClient.is_unset(request.instance_name): + body['InstanceName'] = request.instance_name + if not UtilClient.is_unset(request.labels): + body['Labels'] = request.labels + if not UtilClient.is_unset(request.priority): + body['Priority'] = request.priority + if not UtilClient.is_unset(request.requested_resource): + body['RequestedResource'] = request.requested_resource + if not UtilClient.is_unset(request.resource_id): + body['ResourceId'] = request.resource_id + if not UtilClient.is_unset(request.user_id): + body['UserId'] = request.user_id + if not UtilClient.is_unset(request.user_vpc): + body['UserVpc'] = request.user_vpc + if not UtilClient.is_unset(request.workspace_id): + body['WorkspaceId'] = request.workspace_id + if not UtilClient.is_unset(request.workspace_source): + body['WorkspaceSource'] = request.workspace_source + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='CreateInstance', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.CreateInstanceResponse(), + await self.call_api_async(params, req, runtime) + ) + + def create_instance( + self, + request: pai_dsw_20220101_models.CreateInstanceRequest, + ) -> pai_dsw_20220101_models.CreateInstanceResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return self.create_instance_with_options(request, headers, runtime) + + async def create_instance_async( + self, + request: pai_dsw_20220101_models.CreateInstanceRequest, + ) -> pai_dsw_20220101_models.CreateInstanceResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return await self.create_instance_with_options_async(request, headers, runtime) + + def create_instance_shutdown_timer_with_options( + self, + instance_id: str, + request: pai_dsw_20220101_models.CreateInstanceShutdownTimerRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.CreateInstanceShutdownTimerResponse: + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.due_time): + body['DueTime'] = request.due_time + if not UtilClient.is_unset(request.remaining_time_in_ms): + body['RemainingTimeInMs'] = request.remaining_time_in_ms + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='CreateInstanceShutdownTimer', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/shutdowntimer', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.CreateInstanceShutdownTimerResponse(), + self.call_api(params, req, runtime) + ) + + async def create_instance_shutdown_timer_with_options_async( + self, + instance_id: str, + request: pai_dsw_20220101_models.CreateInstanceShutdownTimerRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.CreateInstanceShutdownTimerResponse: + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.due_time): + body['DueTime'] = request.due_time + if not UtilClient.is_unset(request.remaining_time_in_ms): + body['RemainingTimeInMs'] = request.remaining_time_in_ms + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='CreateInstanceShutdownTimer', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/shutdowntimer', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.CreateInstanceShutdownTimerResponse(), + await self.call_api_async(params, req, runtime) + ) + + def create_instance_shutdown_timer( + self, + instance_id: str, + request: pai_dsw_20220101_models.CreateInstanceShutdownTimerRequest, + ) -> pai_dsw_20220101_models.CreateInstanceShutdownTimerResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return self.create_instance_shutdown_timer_with_options(instance_id, request, headers, runtime) + + async def create_instance_shutdown_timer_async( + self, + instance_id: str, + request: pai_dsw_20220101_models.CreateInstanceShutdownTimerRequest, + ) -> pai_dsw_20220101_models.CreateInstanceShutdownTimerResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return await self.create_instance_shutdown_timer_with_options_async(instance_id, request, headers, runtime) + + def create_instance_snapshot_with_options( + self, + instance_id: str, + request: pai_dsw_20220101_models.CreateInstanceSnapshotRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.CreateInstanceSnapshotResponse: + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.exclude_paths): + body['ExcludePaths'] = request.exclude_paths + if not UtilClient.is_unset(request.image_url): + body['ImageUrl'] = request.image_url + if not UtilClient.is_unset(request.labels): + body['Labels'] = request.labels + if not UtilClient.is_unset(request.overwrite): + body['Overwrite'] = request.overwrite + if not UtilClient.is_unset(request.snapshot_description): + body['SnapshotDescription'] = request.snapshot_description + if not UtilClient.is_unset(request.snapshot_name): + body['SnapshotName'] = request.snapshot_name + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='CreateInstanceSnapshot', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/snapshots', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.CreateInstanceSnapshotResponse(), + self.call_api(params, req, runtime) + ) + + async def create_instance_snapshot_with_options_async( + self, + instance_id: str, + request: pai_dsw_20220101_models.CreateInstanceSnapshotRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.CreateInstanceSnapshotResponse: + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.exclude_paths): + body['ExcludePaths'] = request.exclude_paths + if not UtilClient.is_unset(request.image_url): + body['ImageUrl'] = request.image_url + if not UtilClient.is_unset(request.labels): + body['Labels'] = request.labels + if not UtilClient.is_unset(request.overwrite): + body['Overwrite'] = request.overwrite + if not UtilClient.is_unset(request.snapshot_description): + body['SnapshotDescription'] = request.snapshot_description + if not UtilClient.is_unset(request.snapshot_name): + body['SnapshotName'] = request.snapshot_name + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='CreateInstanceSnapshot', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/snapshots', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.CreateInstanceSnapshotResponse(), + await self.call_api_async(params, req, runtime) + ) + + def create_instance_snapshot( + self, + instance_id: str, + request: pai_dsw_20220101_models.CreateInstanceSnapshotRequest, + ) -> pai_dsw_20220101_models.CreateInstanceSnapshotResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return self.create_instance_snapshot_with_options(instance_id, request, headers, runtime) + + async def create_instance_snapshot_async( + self, + instance_id: str, + request: pai_dsw_20220101_models.CreateInstanceSnapshotRequest, + ) -> pai_dsw_20220101_models.CreateInstanceSnapshotResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return await self.create_instance_snapshot_with_options_async(instance_id, request, headers, runtime) + + def delete_idle_instance_culler_with_options( + self, + instance_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.DeleteIdleInstanceCullerResponse: + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='DeleteIdleInstanceCuller', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/idleinstanceculler', + method='DELETE', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.DeleteIdleInstanceCullerResponse(), + self.call_api(params, req, runtime) + ) + + async def delete_idle_instance_culler_with_options_async( + self, + instance_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.DeleteIdleInstanceCullerResponse: + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='DeleteIdleInstanceCuller', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/idleinstanceculler', + method='DELETE', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.DeleteIdleInstanceCullerResponse(), + await self.call_api_async(params, req, runtime) + ) + + def delete_idle_instance_culler( + self, + instance_id: str, + ) -> pai_dsw_20220101_models.DeleteIdleInstanceCullerResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return self.delete_idle_instance_culler_with_options(instance_id, headers, runtime) + + async def delete_idle_instance_culler_async( + self, + instance_id: str, + ) -> pai_dsw_20220101_models.DeleteIdleInstanceCullerResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return await self.delete_idle_instance_culler_with_options_async(instance_id, headers, runtime) + + def delete_instance_with_options( + self, + instance_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.DeleteInstanceResponse: + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='DeleteInstance', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}', + method='DELETE', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.DeleteInstanceResponse(), + self.call_api(params, req, runtime) + ) + + async def delete_instance_with_options_async( + self, + instance_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.DeleteInstanceResponse: + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='DeleteInstance', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}', + method='DELETE', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.DeleteInstanceResponse(), + await self.call_api_async(params, req, runtime) + ) + + def delete_instance( + self, + instance_id: str, + ) -> pai_dsw_20220101_models.DeleteInstanceResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return self.delete_instance_with_options(instance_id, headers, runtime) + + async def delete_instance_async( + self, + instance_id: str, + ) -> pai_dsw_20220101_models.DeleteInstanceResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return await self.delete_instance_with_options_async(instance_id, headers, runtime) + + def delete_instance_shutdown_timer_with_options( + self, + instance_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.DeleteInstanceShutdownTimerResponse: + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='DeleteInstanceShutdownTimer', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/shutdowntimer', + method='DELETE', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.DeleteInstanceShutdownTimerResponse(), + self.call_api(params, req, runtime) + ) + + async def delete_instance_shutdown_timer_with_options_async( + self, + instance_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.DeleteInstanceShutdownTimerResponse: + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='DeleteInstanceShutdownTimer', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/shutdowntimer', + method='DELETE', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.DeleteInstanceShutdownTimerResponse(), + await self.call_api_async(params, req, runtime) + ) + + def delete_instance_shutdown_timer( + self, + instance_id: str, + ) -> pai_dsw_20220101_models.DeleteInstanceShutdownTimerResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return self.delete_instance_shutdown_timer_with_options(instance_id, headers, runtime) + + async def delete_instance_shutdown_timer_async( + self, + instance_id: str, + ) -> pai_dsw_20220101_models.DeleteInstanceShutdownTimerResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return await self.delete_instance_shutdown_timer_with_options_async(instance_id, headers, runtime) + + def delete_instance_snapshot_with_options( + self, + instance_id: str, + snapshot_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.DeleteInstanceSnapshotResponse: + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='DeleteInstanceSnapshot', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/snapshots/{OpenApiUtilClient.get_encode_param(snapshot_id)}', + method='DELETE', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.DeleteInstanceSnapshotResponse(), + self.call_api(params, req, runtime) + ) + + async def delete_instance_snapshot_with_options_async( + self, + instance_id: str, + snapshot_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.DeleteInstanceSnapshotResponse: + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='DeleteInstanceSnapshot', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/snapshots/{OpenApiUtilClient.get_encode_param(snapshot_id)}', + method='DELETE', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.DeleteInstanceSnapshotResponse(), + await self.call_api_async(params, req, runtime) + ) + + def delete_instance_snapshot( + self, + instance_id: str, + snapshot_id: str, + ) -> pai_dsw_20220101_models.DeleteInstanceSnapshotResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return self.delete_instance_snapshot_with_options(instance_id, snapshot_id, headers, runtime) + + async def delete_instance_snapshot_async( + self, + instance_id: str, + snapshot_id: str, + ) -> pai_dsw_20220101_models.DeleteInstanceSnapshotResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return await self.delete_instance_snapshot_with_options_async(instance_id, snapshot_id, headers, runtime) + + def get_idle_instance_culler_with_options( + self, + instance_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.GetIdleInstanceCullerResponse: + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='GetIdleInstanceCuller', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/idleinstanceculler', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.GetIdleInstanceCullerResponse(), + self.call_api(params, req, runtime) + ) + + async def get_idle_instance_culler_with_options_async( + self, + instance_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.GetIdleInstanceCullerResponse: + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='GetIdleInstanceCuller', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/idleinstanceculler', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.GetIdleInstanceCullerResponse(), + await self.call_api_async(params, req, runtime) + ) + + def get_idle_instance_culler( + self, + instance_id: str, + ) -> pai_dsw_20220101_models.GetIdleInstanceCullerResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return self.get_idle_instance_culler_with_options(instance_id, headers, runtime) + + async def get_idle_instance_culler_async( + self, + instance_id: str, + ) -> pai_dsw_20220101_models.GetIdleInstanceCullerResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return await self.get_idle_instance_culler_with_options_async(instance_id, headers, runtime) + + def get_instance_with_options( + self, + instance_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.GetInstanceResponse: + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='GetInstance', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.GetInstanceResponse(), + self.call_api(params, req, runtime) + ) + + async def get_instance_with_options_async( + self, + instance_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.GetInstanceResponse: + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='GetInstance', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.GetInstanceResponse(), + await self.call_api_async(params, req, runtime) + ) + + def get_instance( + self, + instance_id: str, + ) -> pai_dsw_20220101_models.GetInstanceResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return self.get_instance_with_options(instance_id, headers, runtime) + + async def get_instance_async( + self, + instance_id: str, + ) -> pai_dsw_20220101_models.GetInstanceResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return await self.get_instance_with_options_async(instance_id, headers, runtime) + + def get_instance_events_with_options( + self, + instance_id: str, + request: pai_dsw_20220101_models.GetInstanceEventsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.GetInstanceEventsResponse: + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.end_time): + query['EndTime'] = request.end_time + if not UtilClient.is_unset(request.max_events_num): + query['MaxEventsNum'] = request.max_events_num + if not UtilClient.is_unset(request.start_time): + query['StartTime'] = request.start_time + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='GetInstanceEvents', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/events', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.GetInstanceEventsResponse(), + self.call_api(params, req, runtime) + ) + + async def get_instance_events_with_options_async( + self, + instance_id: str, + request: pai_dsw_20220101_models.GetInstanceEventsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.GetInstanceEventsResponse: + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.end_time): + query['EndTime'] = request.end_time + if not UtilClient.is_unset(request.max_events_num): + query['MaxEventsNum'] = request.max_events_num + if not UtilClient.is_unset(request.start_time): + query['StartTime'] = request.start_time + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='GetInstanceEvents', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/events', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.GetInstanceEventsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def get_instance_events( + self, + instance_id: str, + request: pai_dsw_20220101_models.GetInstanceEventsRequest, + ) -> pai_dsw_20220101_models.GetInstanceEventsResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return self.get_instance_events_with_options(instance_id, request, headers, runtime) + + async def get_instance_events_async( + self, + instance_id: str, + request: pai_dsw_20220101_models.GetInstanceEventsRequest, + ) -> pai_dsw_20220101_models.GetInstanceEventsResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return await self.get_instance_events_with_options_async(instance_id, request, headers, runtime) + + def get_instance_metrics_with_options( + self, + instance_id: str, + request: pai_dsw_20220101_models.GetInstanceMetricsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.GetInstanceMetricsResponse: + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.end_time): + query['EndTime'] = request.end_time + if not UtilClient.is_unset(request.metric_type): + query['MetricType'] = request.metric_type + if not UtilClient.is_unset(request.start_time): + query['StartTime'] = request.start_time + if not UtilClient.is_unset(request.time_step): + query['TimeStep'] = request.time_step + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='GetInstanceMetrics', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instance/{OpenApiUtilClient.get_encode_param(instance_id)}/metrics', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.GetInstanceMetricsResponse(), + self.call_api(params, req, runtime) + ) + + async def get_instance_metrics_with_options_async( + self, + instance_id: str, + request: pai_dsw_20220101_models.GetInstanceMetricsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.GetInstanceMetricsResponse: + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.end_time): + query['EndTime'] = request.end_time + if not UtilClient.is_unset(request.metric_type): + query['MetricType'] = request.metric_type + if not UtilClient.is_unset(request.start_time): + query['StartTime'] = request.start_time + if not UtilClient.is_unset(request.time_step): + query['TimeStep'] = request.time_step + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='GetInstanceMetrics', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instance/{OpenApiUtilClient.get_encode_param(instance_id)}/metrics', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.GetInstanceMetricsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def get_instance_metrics( + self, + instance_id: str, + request: pai_dsw_20220101_models.GetInstanceMetricsRequest, + ) -> pai_dsw_20220101_models.GetInstanceMetricsResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return self.get_instance_metrics_with_options(instance_id, request, headers, runtime) + + async def get_instance_metrics_async( + self, + instance_id: str, + request: pai_dsw_20220101_models.GetInstanceMetricsRequest, + ) -> pai_dsw_20220101_models.GetInstanceMetricsResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return await self.get_instance_metrics_with_options_async(instance_id, request, headers, runtime) + + def get_instance_shutdown_timer_with_options( + self, + instance_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.GetInstanceShutdownTimerResponse: + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='GetInstanceShutdownTimer', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/shutdowntimer', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.GetInstanceShutdownTimerResponse(), + self.call_api(params, req, runtime) + ) + + async def get_instance_shutdown_timer_with_options_async( + self, + instance_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.GetInstanceShutdownTimerResponse: + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='GetInstanceShutdownTimer', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/shutdowntimer', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.GetInstanceShutdownTimerResponse(), + await self.call_api_async(params, req, runtime) + ) + + def get_instance_shutdown_timer( + self, + instance_id: str, + ) -> pai_dsw_20220101_models.GetInstanceShutdownTimerResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return self.get_instance_shutdown_timer_with_options(instance_id, headers, runtime) + + async def get_instance_shutdown_timer_async( + self, + instance_id: str, + ) -> pai_dsw_20220101_models.GetInstanceShutdownTimerResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return await self.get_instance_shutdown_timer_with_options_async(instance_id, headers, runtime) + + def get_instance_snapshot_with_options( + self, + instance_id: str, + snapshot_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.GetInstanceSnapshotResponse: + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='GetInstanceSnapshot', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/snapshots/{OpenApiUtilClient.get_encode_param(snapshot_id)}', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.GetInstanceSnapshotResponse(), + self.call_api(params, req, runtime) + ) + + async def get_instance_snapshot_with_options_async( + self, + instance_id: str, + snapshot_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.GetInstanceSnapshotResponse: + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='GetInstanceSnapshot', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/snapshots/{OpenApiUtilClient.get_encode_param(snapshot_id)}', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.GetInstanceSnapshotResponse(), + await self.call_api_async(params, req, runtime) + ) + + def get_instance_snapshot( + self, + instance_id: str, + snapshot_id: str, + ) -> pai_dsw_20220101_models.GetInstanceSnapshotResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return self.get_instance_snapshot_with_options(instance_id, snapshot_id, headers, runtime) + + async def get_instance_snapshot_async( + self, + instance_id: str, + snapshot_id: str, + ) -> pai_dsw_20220101_models.GetInstanceSnapshotResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return await self.get_instance_snapshot_with_options_async(instance_id, snapshot_id, headers, runtime) + + def get_lifecycle_with_options( + self, + instance_id: str, + request: pai_dsw_20220101_models.GetLifecycleRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.GetLifecycleResponse: + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.end_time): + query['EndTime'] = request.end_time + if not UtilClient.is_unset(request.limit): + query['Limit'] = request.limit + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.session_number): + query['SessionNumber'] = request.session_number + if not UtilClient.is_unset(request.start_time): + query['StartTime'] = request.start_time + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='GetLifecycle', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/lifecycle', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.GetLifecycleResponse(), + self.call_api(params, req, runtime) + ) + + async def get_lifecycle_with_options_async( + self, + instance_id: str, + request: pai_dsw_20220101_models.GetLifecycleRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.GetLifecycleResponse: + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.end_time): + query['EndTime'] = request.end_time + if not UtilClient.is_unset(request.limit): + query['Limit'] = request.limit + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.session_number): + query['SessionNumber'] = request.session_number + if not UtilClient.is_unset(request.start_time): + query['StartTime'] = request.start_time + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='GetLifecycle', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/lifecycle', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.GetLifecycleResponse(), + await self.call_api_async(params, req, runtime) + ) + + def get_lifecycle( + self, + instance_id: str, + request: pai_dsw_20220101_models.GetLifecycleRequest, + ) -> pai_dsw_20220101_models.GetLifecycleResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return self.get_lifecycle_with_options(instance_id, request, headers, runtime) + + async def get_lifecycle_async( + self, + instance_id: str, + request: pai_dsw_20220101_models.GetLifecycleRequest, + ) -> pai_dsw_20220101_models.GetLifecycleResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return await self.get_lifecycle_with_options_async(instance_id, request, headers, runtime) + + def get_resource_group_statistics_with_options( + self, + request: pai_dsw_20220101_models.GetResourceGroupStatisticsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.GetResourceGroupStatisticsResponse: + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.end_time): + query['EndTime'] = request.end_time + if not UtilClient.is_unset(request.resource_id): + query['ResourceId'] = request.resource_id + if not UtilClient.is_unset(request.start_time): + query['StartTime'] = request.start_time + if not UtilClient.is_unset(request.workspace_ids): + query['WorkspaceIds'] = request.workspace_ids + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='GetResourceGroupStatistics', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/resourcegroupstatistics', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.GetResourceGroupStatisticsResponse(), + self.call_api(params, req, runtime) + ) + + async def get_resource_group_statistics_with_options_async( + self, + request: pai_dsw_20220101_models.GetResourceGroupStatisticsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.GetResourceGroupStatisticsResponse: + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.end_time): + query['EndTime'] = request.end_time + if not UtilClient.is_unset(request.resource_id): + query['ResourceId'] = request.resource_id + if not UtilClient.is_unset(request.start_time): + query['StartTime'] = request.start_time + if not UtilClient.is_unset(request.workspace_ids): + query['WorkspaceIds'] = request.workspace_ids + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='GetResourceGroupStatistics', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/resourcegroupstatistics', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.GetResourceGroupStatisticsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def get_resource_group_statistics( + self, + request: pai_dsw_20220101_models.GetResourceGroupStatisticsRequest, + ) -> pai_dsw_20220101_models.GetResourceGroupStatisticsResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return self.get_resource_group_statistics_with_options(request, headers, runtime) + + async def get_resource_group_statistics_async( + self, + request: pai_dsw_20220101_models.GetResourceGroupStatisticsRequest, + ) -> pai_dsw_20220101_models.GetResourceGroupStatisticsResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return await self.get_resource_group_statistics_with_options_async(request, headers, runtime) + + def get_token_with_options( + self, + request: pai_dsw_20220101_models.GetTokenRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.GetTokenResponse: + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.expire_time): + query['ExpireTime'] = request.expire_time + if not UtilClient.is_unset(request.instance_id): + query['InstanceId'] = request.instance_id + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='GetToken', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/tokens', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.GetTokenResponse(), + self.call_api(params, req, runtime) + ) + + async def get_token_with_options_async( + self, + request: pai_dsw_20220101_models.GetTokenRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.GetTokenResponse: + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.expire_time): + query['ExpireTime'] = request.expire_time + if not UtilClient.is_unset(request.instance_id): + query['InstanceId'] = request.instance_id + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='GetToken', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/tokens', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.GetTokenResponse(), + await self.call_api_async(params, req, runtime) + ) + + def get_token( + self, + request: pai_dsw_20220101_models.GetTokenRequest, + ) -> pai_dsw_20220101_models.GetTokenResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return self.get_token_with_options(request, headers, runtime) + + async def get_token_async( + self, + request: pai_dsw_20220101_models.GetTokenRequest, + ) -> pai_dsw_20220101_models.GetTokenResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return await self.get_token_with_options_async(request, headers, runtime) + + def get_user_config_with_options( + self, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.GetUserConfigResponse: + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='GetUserConfig', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/userconfig', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.GetUserConfigResponse(), + self.call_api(params, req, runtime) + ) + + async def get_user_config_with_options_async( + self, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.GetUserConfigResponse: + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='GetUserConfig', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/userconfig', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.GetUserConfigResponse(), + await self.call_api_async(params, req, runtime) + ) + + def get_user_config(self) -> pai_dsw_20220101_models.GetUserConfigResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return self.get_user_config_with_options(headers, runtime) + + async def get_user_config_async(self) -> pai_dsw_20220101_models.GetUserConfigResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return await self.get_user_config_with_options_async(headers, runtime) + + def list_ecs_specs_with_options( + self, + request: pai_dsw_20220101_models.ListEcsSpecsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.ListEcsSpecsResponse: + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.accelerator_type): + query['AcceleratorType'] = request.accelerator_type + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListEcsSpecs', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/ecsspecs', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.ListEcsSpecsResponse(), + self.call_api(params, req, runtime) + ) + + async def list_ecs_specs_with_options_async( + self, + request: pai_dsw_20220101_models.ListEcsSpecsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.ListEcsSpecsResponse: + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.accelerator_type): + query['AcceleratorType'] = request.accelerator_type + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListEcsSpecs', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/ecsspecs', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.ListEcsSpecsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_ecs_specs( + self, + request: pai_dsw_20220101_models.ListEcsSpecsRequest, + ) -> pai_dsw_20220101_models.ListEcsSpecsResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_ecs_specs_with_options(request, headers, runtime) + + async def list_ecs_specs_async( + self, + request: pai_dsw_20220101_models.ListEcsSpecsRequest, + ) -> pai_dsw_20220101_models.ListEcsSpecsResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_ecs_specs_with_options_async(request, headers, runtime) + + def list_instance_snapshot_with_options( + self, + instance_id: str, + request: pai_dsw_20220101_models.ListInstanceSnapshotRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.ListInstanceSnapshotResponse: + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListInstanceSnapshot', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/snapshots', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.ListInstanceSnapshotResponse(), + self.call_api(params, req, runtime) + ) + + async def list_instance_snapshot_with_options_async( + self, + instance_id: str, + request: pai_dsw_20220101_models.ListInstanceSnapshotRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.ListInstanceSnapshotResponse: + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListInstanceSnapshot', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/snapshots', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.ListInstanceSnapshotResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_instance_snapshot( + self, + instance_id: str, + request: pai_dsw_20220101_models.ListInstanceSnapshotRequest, + ) -> pai_dsw_20220101_models.ListInstanceSnapshotResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_instance_snapshot_with_options(instance_id, request, headers, runtime) + + async def list_instance_snapshot_async( + self, + instance_id: str, + request: pai_dsw_20220101_models.ListInstanceSnapshotRequest, + ) -> pai_dsw_20220101_models.ListInstanceSnapshotResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_instance_snapshot_with_options_async(instance_id, request, headers, runtime) + + def list_instance_statistics_with_options( + self, + request: pai_dsw_20220101_models.ListInstanceStatisticsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.ListInstanceStatisticsResponse: + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.workspace_ids): + query['WorkspaceIds'] = request.workspace_ids + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListInstanceStatistics', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instancestatistics', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.ListInstanceStatisticsResponse(), + self.call_api(params, req, runtime) + ) + + async def list_instance_statistics_with_options_async( + self, + request: pai_dsw_20220101_models.ListInstanceStatisticsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.ListInstanceStatisticsResponse: + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.workspace_ids): + query['WorkspaceIds'] = request.workspace_ids + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListInstanceStatistics', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instancestatistics', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.ListInstanceStatisticsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_instance_statistics( + self, + request: pai_dsw_20220101_models.ListInstanceStatisticsRequest, + ) -> pai_dsw_20220101_models.ListInstanceStatisticsResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_instance_statistics_with_options(request, headers, runtime) + + async def list_instance_statistics_async( + self, + request: pai_dsw_20220101_models.ListInstanceStatisticsRequest, + ) -> pai_dsw_20220101_models.ListInstanceStatisticsResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_instance_statistics_with_options_async(request, headers, runtime) + + def list_instances_with_options( + self, + request: pai_dsw_20220101_models.ListInstancesRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.ListInstancesResponse: + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.accelerator_type): + query['AcceleratorType'] = request.accelerator_type + if not UtilClient.is_unset(request.accessibility): + query['Accessibility'] = request.accessibility + if not UtilClient.is_unset(request.instance_id): + query['InstanceId'] = request.instance_id + if not UtilClient.is_unset(request.instance_name): + query['InstanceName'] = request.instance_name + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.payment_type): + query['PaymentType'] = request.payment_type + if not UtilClient.is_unset(request.resource_id): + query['ResourceId'] = request.resource_id + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.status): + query['Status'] = request.status + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListInstances', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.ListInstancesResponse(), + self.call_api(params, req, runtime) + ) + + async def list_instances_with_options_async( + self, + request: pai_dsw_20220101_models.ListInstancesRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.ListInstancesResponse: + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.accelerator_type): + query['AcceleratorType'] = request.accelerator_type + if not UtilClient.is_unset(request.accessibility): + query['Accessibility'] = request.accessibility + if not UtilClient.is_unset(request.instance_id): + query['InstanceId'] = request.instance_id + if not UtilClient.is_unset(request.instance_name): + query['InstanceName'] = request.instance_name + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.payment_type): + query['PaymentType'] = request.payment_type + if not UtilClient.is_unset(request.resource_id): + query['ResourceId'] = request.resource_id + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.status): + query['Status'] = request.status + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListInstances', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.ListInstancesResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_instances( + self, + request: pai_dsw_20220101_models.ListInstancesRequest, + ) -> pai_dsw_20220101_models.ListInstancesResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_instances_with_options(request, headers, runtime) + + async def list_instances_async( + self, + request: pai_dsw_20220101_models.ListInstancesRequest, + ) -> pai_dsw_20220101_models.ListInstancesResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_instances_with_options_async(request, headers, runtime) + + def start_instance_with_options( + self, + instance_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.StartInstanceResponse: + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='StartInstance', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/start', + method='PUT', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.StartInstanceResponse(), + self.call_api(params, req, runtime) + ) + + async def start_instance_with_options_async( + self, + instance_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.StartInstanceResponse: + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='StartInstance', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/start', + method='PUT', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.StartInstanceResponse(), + await self.call_api_async(params, req, runtime) + ) + + def start_instance( + self, + instance_id: str, + ) -> pai_dsw_20220101_models.StartInstanceResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return self.start_instance_with_options(instance_id, headers, runtime) + + async def start_instance_async( + self, + instance_id: str, + ) -> pai_dsw_20220101_models.StartInstanceResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return await self.start_instance_with_options_async(instance_id, headers, runtime) + + def stop_instance_with_options( + self, + instance_id: str, + request: pai_dsw_20220101_models.StopInstanceRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.StopInstanceResponse: + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.save_image): + query['SaveImage'] = request.save_image + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='StopInstance', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/stop', + method='PUT', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.StopInstanceResponse(), + self.call_api(params, req, runtime) + ) + + async def stop_instance_with_options_async( + self, + instance_id: str, + request: pai_dsw_20220101_models.StopInstanceRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.StopInstanceResponse: + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.save_image): + query['SaveImage'] = request.save_image + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='StopInstance', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/stop', + method='PUT', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.StopInstanceResponse(), + await self.call_api_async(params, req, runtime) + ) + + def stop_instance( + self, + instance_id: str, + request: pai_dsw_20220101_models.StopInstanceRequest, + ) -> pai_dsw_20220101_models.StopInstanceResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return self.stop_instance_with_options(instance_id, request, headers, runtime) + + async def stop_instance_async( + self, + instance_id: str, + request: pai_dsw_20220101_models.StopInstanceRequest, + ) -> pai_dsw_20220101_models.StopInstanceResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return await self.stop_instance_with_options_async(instance_id, request, headers, runtime) + + def update_instance_with_options( + self, + instance_id: str, + request: pai_dsw_20220101_models.UpdateInstanceRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.UpdateInstanceResponse: + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.accessibility): + body['Accessibility'] = request.accessibility + if not UtilClient.is_unset(request.cloud_disks): + body['CloudDisks'] = request.cloud_disks + if not UtilClient.is_unset(request.datasets): + body['Datasets'] = request.datasets + if not UtilClient.is_unset(request.disassociate_datasets): + body['DisassociateDatasets'] = request.disassociate_datasets + if not UtilClient.is_unset(request.disassociate_driver): + body['DisassociateDriver'] = request.disassociate_driver + if not UtilClient.is_unset(request.disassociate_forward_infos): + body['DisassociateForwardInfos'] = request.disassociate_forward_infos + if not UtilClient.is_unset(request.disassociate_vpc): + body['DisassociateVpc'] = request.disassociate_vpc + if not UtilClient.is_unset(request.driver): + body['Driver'] = request.driver + if not UtilClient.is_unset(request.ecs_spec): + body['EcsSpec'] = request.ecs_spec + if not UtilClient.is_unset(request.image_id): + body['ImageId'] = request.image_id + if not UtilClient.is_unset(request.image_url): + body['ImageUrl'] = request.image_url + if not UtilClient.is_unset(request.instance_name): + body['InstanceName'] = request.instance_name + if not UtilClient.is_unset(request.priority): + body['Priority'] = request.priority + if not UtilClient.is_unset(request.requested_resource): + body['RequestedResource'] = request.requested_resource + if not UtilClient.is_unset(request.user_id): + body['UserId'] = request.user_id + if not UtilClient.is_unset(request.user_vpc): + body['UserVpc'] = request.user_vpc + if not UtilClient.is_unset(request.workspace_source): + body['WorkspaceSource'] = request.workspace_source + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='UpdateInstance', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}', + method='PUT', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.UpdateInstanceResponse(), + self.call_api(params, req, runtime) + ) + + async def update_instance_with_options_async( + self, + instance_id: str, + request: pai_dsw_20220101_models.UpdateInstanceRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_dsw_20220101_models.UpdateInstanceResponse: + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.accessibility): + body['Accessibility'] = request.accessibility + if not UtilClient.is_unset(request.cloud_disks): + body['CloudDisks'] = request.cloud_disks + if not UtilClient.is_unset(request.datasets): + body['Datasets'] = request.datasets + if not UtilClient.is_unset(request.disassociate_datasets): + body['DisassociateDatasets'] = request.disassociate_datasets + if not UtilClient.is_unset(request.disassociate_driver): + body['DisassociateDriver'] = request.disassociate_driver + if not UtilClient.is_unset(request.disassociate_forward_infos): + body['DisassociateForwardInfos'] = request.disassociate_forward_infos + if not UtilClient.is_unset(request.disassociate_vpc): + body['DisassociateVpc'] = request.disassociate_vpc + if not UtilClient.is_unset(request.driver): + body['Driver'] = request.driver + if not UtilClient.is_unset(request.ecs_spec): + body['EcsSpec'] = request.ecs_spec + if not UtilClient.is_unset(request.image_id): + body['ImageId'] = request.image_id + if not UtilClient.is_unset(request.image_url): + body['ImageUrl'] = request.image_url + if not UtilClient.is_unset(request.instance_name): + body['InstanceName'] = request.instance_name + if not UtilClient.is_unset(request.priority): + body['Priority'] = request.priority + if not UtilClient.is_unset(request.requested_resource): + body['RequestedResource'] = request.requested_resource + if not UtilClient.is_unset(request.user_id): + body['UserId'] = request.user_id + if not UtilClient.is_unset(request.user_vpc): + body['UserVpc'] = request.user_vpc + if not UtilClient.is_unset(request.workspace_source): + body['WorkspaceSource'] = request.workspace_source + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='UpdateInstance', + version='2022-01-01', + protocol='HTTPS', + pathname=f'/api/v2/instances/{OpenApiUtilClient.get_encode_param(instance_id)}', + method='PUT', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_dsw_20220101_models.UpdateInstanceResponse(), + await self.call_api_async(params, req, runtime) + ) + + def update_instance( + self, + instance_id: str, + request: pai_dsw_20220101_models.UpdateInstanceRequest, + ) -> pai_dsw_20220101_models.UpdateInstanceResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return self.update_instance_with_options(instance_id, request, headers, runtime) + + async def update_instance_async( + self, + instance_id: str, + request: pai_dsw_20220101_models.UpdateInstanceRequest, + ) -> pai_dsw_20220101_models.UpdateInstanceResponse: + runtime = util_models.RuntimeOptions() + headers = {} + return await self.update_instance_with_options_async(instance_id, request, headers, runtime) diff --git a/pai/libs/alibabacloud_pai_dsw20220101/models.py b/pai/libs/alibabacloud_pai_dsw20220101/models.py new file mode 100644 index 0000000..53599e2 --- /dev/null +++ b/pai/libs/alibabacloud_pai_dsw20220101/models.py @@ -0,0 +1,6444 @@ +# -*- coding: utf-8 -*- +# This file is auto-generated, don't edit it. Thanks. +from Tea.model import TeaModel +from typing import List, Dict + + +class DemoCategory(TeaModel): + def __init__( + self, + category_code: str = None, + category_name: str = None, + order: int = None, + sub_categories: List['DemoCategory'] = None, + ): + self.category_code = category_code + self.category_name = category_name + self.order = order + self.sub_categories = sub_categories + + def validate(self): + if self.sub_categories: + for k in self.sub_categories: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.category_code is not None: + result['CategoryCode'] = self.category_code + if self.category_name is not None: + result['CategoryName'] = self.category_name + if self.order is not None: + result['Order'] = self.order + result['SubCategories'] = [] + if self.sub_categories is not None: + for k in self.sub_categories: + result['SubCategories'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('CategoryCode') is not None: + self.category_code = m.get('CategoryCode') + if m.get('CategoryName') is not None: + self.category_name = m.get('CategoryName') + if m.get('Order') is not None: + self.order = m.get('Order') + self.sub_categories = [] + if m.get('SubCategories') is not None: + for k in m.get('SubCategories'): + temp_model = DemoCategory() + self.sub_categories.append(temp_model.from_map(k)) + return self + + +class ForwardInfo(TeaModel): + def __init__( + self, + container_name: str = None, + eip_allocation_id: str = None, + enable: bool = None, + nat_gateway_id: str = None, + port: str = None, + sshpublic_key: str = None, + ): + self.container_name = container_name + self.eip_allocation_id = eip_allocation_id + self.enable = enable + self.nat_gateway_id = nat_gateway_id + self.port = port + self.sshpublic_key = sshpublic_key + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.container_name is not None: + result['ContainerName'] = self.container_name + if self.eip_allocation_id is not None: + result['EipAllocationId'] = self.eip_allocation_id + if self.enable is not None: + result['Enable'] = self.enable + if self.nat_gateway_id is not None: + result['NatGatewayId'] = self.nat_gateway_id + if self.port is not None: + result['Port'] = self.port + if self.sshpublic_key is not None: + result['SSHPublicKey'] = self.sshpublic_key + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ContainerName') is not None: + self.container_name = m.get('ContainerName') + if m.get('EipAllocationId') is not None: + self.eip_allocation_id = m.get('EipAllocationId') + if m.get('Enable') is not None: + self.enable = m.get('Enable') + if m.get('NatGatewayId') is not None: + self.nat_gateway_id = m.get('NatGatewayId') + if m.get('Port') is not None: + self.port = m.get('Port') + if m.get('SSHPublicKey') is not None: + self.sshpublic_key = m.get('SSHPublicKey') + return self + + +class ForwardInfoResponseConnectInfoInternet(TeaModel): + def __init__( + self, + endpoint: str = None, + port: str = None, + ): + self.endpoint = endpoint + self.port = port + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.endpoint is not None: + result['Endpoint'] = self.endpoint + if self.port is not None: + result['Port'] = self.port + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Endpoint') is not None: + self.endpoint = m.get('Endpoint') + if m.get('Port') is not None: + self.port = m.get('Port') + return self + + +class ForwardInfoResponseConnectInfoIntranet(TeaModel): + def __init__( + self, + endpoint: str = None, + port: str = None, + ): + self.endpoint = endpoint + self.port = port + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.endpoint is not None: + result['Endpoint'] = self.endpoint + if self.port is not None: + result['Port'] = self.port + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Endpoint') is not None: + self.endpoint = m.get('Endpoint') + if m.get('Port') is not None: + self.port = m.get('Port') + return self + + +class ForwardInfoResponseConnectInfo(TeaModel): + def __init__( + self, + internet: ForwardInfoResponseConnectInfoInternet = None, + intranet: ForwardInfoResponseConnectInfoIntranet = None, + message: str = None, + phase: str = None, + ): + self.internet = internet + self.intranet = intranet + self.message = message + self.phase = phase + + def validate(self): + if self.internet: + self.internet.validate() + if self.intranet: + self.intranet.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.internet is not None: + result['Internet'] = self.internet.to_map() + if self.intranet is not None: + result['Intranet'] = self.intranet.to_map() + if self.message is not None: + result['Message'] = self.message + if self.phase is not None: + result['Phase'] = self.phase + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Internet') is not None: + temp_model = ForwardInfoResponseConnectInfoInternet() + self.internet = temp_model.from_map(m['Internet']) + if m.get('Intranet') is not None: + temp_model = ForwardInfoResponseConnectInfoIntranet() + self.intranet = temp_model.from_map(m['Intranet']) + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('Phase') is not None: + self.phase = m.get('Phase') + return self + + +class ForwardInfoResponse(TeaModel): + def __init__( + self, + connect_info: ForwardInfoResponseConnectInfo = None, + container_name: str = None, + eip_allocation_id: str = None, + enable: bool = None, + nat_gateway_id: str = None, + port: str = None, + sshpublic_key: str = None, + ): + self.connect_info = connect_info + self.container_name = container_name + self.eip_allocation_id = eip_allocation_id + self.enable = enable + self.nat_gateway_id = nat_gateway_id + self.port = port + self.sshpublic_key = sshpublic_key + + def validate(self): + if self.connect_info: + self.connect_info.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.connect_info is not None: + result['ConnectInfo'] = self.connect_info.to_map() + if self.container_name is not None: + result['ContainerName'] = self.container_name + if self.eip_allocation_id is not None: + result['EipAllocationId'] = self.eip_allocation_id + if self.enable is not None: + result['Enable'] = self.enable + if self.nat_gateway_id is not None: + result['NatGatewayId'] = self.nat_gateway_id + if self.port is not None: + result['Port'] = self.port + if self.sshpublic_key is not None: + result['SSHPublicKey'] = self.sshpublic_key + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ConnectInfo') is not None: + temp_model = ForwardInfoResponseConnectInfo() + self.connect_info = temp_model.from_map(m['ConnectInfo']) + if m.get('ContainerName') is not None: + self.container_name = m.get('ContainerName') + if m.get('EipAllocationId') is not None: + self.eip_allocation_id = m.get('EipAllocationId') + if m.get('Enable') is not None: + self.enable = m.get('Enable') + if m.get('NatGatewayId') is not None: + self.nat_gateway_id = m.get('NatGatewayId') + if m.get('Port') is not None: + self.port = m.get('Port') + if m.get('SSHPublicKey') is not None: + self.sshpublic_key = m.get('SSHPublicKey') + return self + + +class CreateIdleInstanceCullerRequest(TeaModel): + def __init__( + self, + cpu_percent_threshold: int = None, + gpu_percent_threshold: int = None, + max_idle_time_in_minutes: int = None, + ): + self.cpu_percent_threshold = cpu_percent_threshold + self.gpu_percent_threshold = gpu_percent_threshold + self.max_idle_time_in_minutes = max_idle_time_in_minutes + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.cpu_percent_threshold is not None: + result['CpuPercentThreshold'] = self.cpu_percent_threshold + if self.gpu_percent_threshold is not None: + result['GpuPercentThreshold'] = self.gpu_percent_threshold + if self.max_idle_time_in_minutes is not None: + result['MaxIdleTimeInMinutes'] = self.max_idle_time_in_minutes + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('CpuPercentThreshold') is not None: + self.cpu_percent_threshold = m.get('CpuPercentThreshold') + if m.get('GpuPercentThreshold') is not None: + self.gpu_percent_threshold = m.get('GpuPercentThreshold') + if m.get('MaxIdleTimeInMinutes') is not None: + self.max_idle_time_in_minutes = m.get('MaxIdleTimeInMinutes') + return self + + +class CreateIdleInstanceCullerResponseBody(TeaModel): + def __init__( + self, + code: str = None, + instance_id: str = None, + message: str = None, + request_id: str = None, + success: bool = None, + ): + self.code = code + self.instance_id = instance_id + self.message = message + self.request_id = request_id + self.success = success + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code is not None: + result['Code'] = self.code + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + if self.message is not None: + result['Message'] = self.message + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.success is not None: + result['Success'] = self.success + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('Success') is not None: + self.success = m.get('Success') + return self + + +class CreateIdleInstanceCullerResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateIdleInstanceCullerResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateIdleInstanceCullerResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class CreateInstanceRequestCloudDisksStatus(TeaModel): + def __init__( + self, + available: int = None, + capacity: int = None, + usage: int = None, + ): + self.available = available + self.capacity = capacity + self.usage = usage + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.available is not None: + result['Available'] = self.available + if self.capacity is not None: + result['Capacity'] = self.capacity + if self.usage is not None: + result['Usage'] = self.usage + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Available') is not None: + self.available = m.get('Available') + if m.get('Capacity') is not None: + self.capacity = m.get('Capacity') + if m.get('Usage') is not None: + self.usage = m.get('Usage') + return self + + +class CreateInstanceRequestCloudDisks(TeaModel): + def __init__( + self, + capacity: str = None, + mount_path: str = None, + path: str = None, + status: CreateInstanceRequestCloudDisksStatus = None, + sub_type: str = None, + ): + self.capacity = capacity + self.mount_path = mount_path + self.path = path + self.status = status + self.sub_type = sub_type + + def validate(self): + if self.status: + self.status.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.capacity is not None: + result['Capacity'] = self.capacity + if self.mount_path is not None: + result['MountPath'] = self.mount_path + if self.path is not None: + result['Path'] = self.path + if self.status is not None: + result['Status'] = self.status.to_map() + if self.sub_type is not None: + result['SubType'] = self.sub_type + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Capacity') is not None: + self.capacity = m.get('Capacity') + if m.get('MountPath') is not None: + self.mount_path = m.get('MountPath') + if m.get('Path') is not None: + self.path = m.get('Path') + if m.get('Status') is not None: + temp_model = CreateInstanceRequestCloudDisksStatus() + self.status = temp_model.from_map(m['Status']) + if m.get('SubType') is not None: + self.sub_type = m.get('SubType') + return self + + +class CreateInstanceRequestDatasets(TeaModel): + def __init__( + self, + dataset_id: str = None, + mount_path: str = None, + ): + self.dataset_id = dataset_id + self.mount_path = mount_path + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.dataset_id is not None: + result['DatasetId'] = self.dataset_id + if self.mount_path is not None: + result['MountPath'] = self.mount_path + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('DatasetId') is not None: + self.dataset_id = m.get('DatasetId') + if m.get('MountPath') is not None: + self.mount_path = m.get('MountPath') + return self + + +class CreateInstanceRequestLabels(TeaModel): + def __init__( + self, + key: str = None, + value: str = None, + ): + self.key = key + self.value = value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + +class CreateInstanceRequestRequestedResource(TeaModel): + def __init__( + self, + cpu: str = None, + gpu: str = None, + gputype: str = None, + memory: str = None, + shared_memory: str = None, + ): + self.cpu = cpu + self.gpu = gpu + self.gputype = gputype + self.memory = memory + self.shared_memory = shared_memory + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.cpu is not None: + result['CPU'] = self.cpu + if self.gpu is not None: + result['GPU'] = self.gpu + if self.gputype is not None: + result['GPUType'] = self.gputype + if self.memory is not None: + result['Memory'] = self.memory + if self.shared_memory is not None: + result['SharedMemory'] = self.shared_memory + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('CPU') is not None: + self.cpu = m.get('CPU') + if m.get('GPU') is not None: + self.gpu = m.get('GPU') + if m.get('GPUType') is not None: + self.gputype = m.get('GPUType') + if m.get('Memory') is not None: + self.memory = m.get('Memory') + if m.get('SharedMemory') is not None: + self.shared_memory = m.get('SharedMemory') + return self + + +class CreateInstanceRequestUserVpc(TeaModel): + def __init__( + self, + default_route: str = None, + extended_cidrs: List[str] = None, + forward_infos: List[ForwardInfo] = None, + security_group_id: str = None, + v_switch_id: str = None, + vpc_id: str = None, + ): + self.default_route = default_route + self.extended_cidrs = extended_cidrs + self.forward_infos = forward_infos + self.security_group_id = security_group_id + self.v_switch_id = v_switch_id + self.vpc_id = vpc_id + + def validate(self): + if self.forward_infos: + for k in self.forward_infos: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.default_route is not None: + result['DefaultRoute'] = self.default_route + if self.extended_cidrs is not None: + result['ExtendedCIDRs'] = self.extended_cidrs + result['ForwardInfos'] = [] + if self.forward_infos is not None: + for k in self.forward_infos: + result['ForwardInfos'].append(k.to_map() if k else None) + if self.security_group_id is not None: + result['SecurityGroupId'] = self.security_group_id + if self.v_switch_id is not None: + result['VSwitchId'] = self.v_switch_id + if self.vpc_id is not None: + result['VpcId'] = self.vpc_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('DefaultRoute') is not None: + self.default_route = m.get('DefaultRoute') + if m.get('ExtendedCIDRs') is not None: + self.extended_cidrs = m.get('ExtendedCIDRs') + self.forward_infos = [] + if m.get('ForwardInfos') is not None: + for k in m.get('ForwardInfos'): + temp_model = ForwardInfo() + self.forward_infos.append(temp_model.from_map(k)) + if m.get('SecurityGroupId') is not None: + self.security_group_id = m.get('SecurityGroupId') + if m.get('VSwitchId') is not None: + self.v_switch_id = m.get('VSwitchId') + if m.get('VpcId') is not None: + self.vpc_id = m.get('VpcId') + return self + + +class CreateInstanceRequest(TeaModel): + def __init__( + self, + accessibility: str = None, + cloud_disks: List[CreateInstanceRequestCloudDisks] = None, + datasets: List[CreateInstanceRequestDatasets] = None, + driver: str = None, + ecs_spec: str = None, + environment_variables: Dict[str, str] = None, + image_id: str = None, + image_url: str = None, + instance_name: str = None, + labels: List[CreateInstanceRequestLabels] = None, + priority: int = None, + requested_resource: CreateInstanceRequestRequestedResource = None, + resource_id: str = None, + user_id: str = None, + user_vpc: CreateInstanceRequestUserVpc = None, + workspace_id: str = None, + workspace_source: str = None, + ): + self.accessibility = accessibility + self.cloud_disks = cloud_disks + self.datasets = datasets + self.driver = driver + self.ecs_spec = ecs_spec + self.environment_variables = environment_variables + self.image_id = image_id + self.image_url = image_url + self.instance_name = instance_name + self.labels = labels + self.priority = priority + self.requested_resource = requested_resource + self.resource_id = resource_id + self.user_id = user_id + self.user_vpc = user_vpc + self.workspace_id = workspace_id + self.workspace_source = workspace_source + + def validate(self): + if self.cloud_disks: + for k in self.cloud_disks: + if k: + k.validate() + if self.datasets: + for k in self.datasets: + if k: + k.validate() + if self.labels: + for k in self.labels: + if k: + k.validate() + if self.requested_resource: + self.requested_resource.validate() + if self.user_vpc: + self.user_vpc.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + result['CloudDisks'] = [] + if self.cloud_disks is not None: + for k in self.cloud_disks: + result['CloudDisks'].append(k.to_map() if k else None) + result['Datasets'] = [] + if self.datasets is not None: + for k in self.datasets: + result['Datasets'].append(k.to_map() if k else None) + if self.driver is not None: + result['Driver'] = self.driver + if self.ecs_spec is not None: + result['EcsSpec'] = self.ecs_spec + if self.environment_variables is not None: + result['EnvironmentVariables'] = self.environment_variables + if self.image_id is not None: + result['ImageId'] = self.image_id + if self.image_url is not None: + result['ImageUrl'] = self.image_url + if self.instance_name is not None: + result['InstanceName'] = self.instance_name + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.priority is not None: + result['Priority'] = self.priority + if self.requested_resource is not None: + result['RequestedResource'] = self.requested_resource.to_map() + if self.resource_id is not None: + result['ResourceId'] = self.resource_id + if self.user_id is not None: + result['UserId'] = self.user_id + if self.user_vpc is not None: + result['UserVpc'] = self.user_vpc.to_map() + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id + if self.workspace_source is not None: + result['WorkspaceSource'] = self.workspace_source + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + self.cloud_disks = [] + if m.get('CloudDisks') is not None: + for k in m.get('CloudDisks'): + temp_model = CreateInstanceRequestCloudDisks() + self.cloud_disks.append(temp_model.from_map(k)) + self.datasets = [] + if m.get('Datasets') is not None: + for k in m.get('Datasets'): + temp_model = CreateInstanceRequestDatasets() + self.datasets.append(temp_model.from_map(k)) + if m.get('Driver') is not None: + self.driver = m.get('Driver') + if m.get('EcsSpec') is not None: + self.ecs_spec = m.get('EcsSpec') + if m.get('EnvironmentVariables') is not None: + self.environment_variables = m.get('EnvironmentVariables') + if m.get('ImageId') is not None: + self.image_id = m.get('ImageId') + if m.get('ImageUrl') is not None: + self.image_url = m.get('ImageUrl') + if m.get('InstanceName') is not None: + self.instance_name = m.get('InstanceName') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = CreateInstanceRequestLabels() + self.labels.append(temp_model.from_map(k)) + if m.get('Priority') is not None: + self.priority = m.get('Priority') + if m.get('RequestedResource') is not None: + temp_model = CreateInstanceRequestRequestedResource() + self.requested_resource = temp_model.from_map(m['RequestedResource']) + if m.get('ResourceId') is not None: + self.resource_id = m.get('ResourceId') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('UserVpc') is not None: + temp_model = CreateInstanceRequestUserVpc() + self.user_vpc = temp_model.from_map(m['UserVpc']) + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + if m.get('WorkspaceSource') is not None: + self.workspace_source = m.get('WorkspaceSource') + return self + + +class CreateInstanceResponseBody(TeaModel): + def __init__( + self, + code: str = None, + http_status_code: int = None, + instance_id: str = None, + message: str = None, + request_id: str = None, + success: bool = None, + ): + self.code = code + self.http_status_code = http_status_code + self.instance_id = instance_id + self.message = message + self.request_id = request_id + self.success = success + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code is not None: + result['Code'] = self.code + if self.http_status_code is not None: + result['HttpStatusCode'] = self.http_status_code + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + if self.message is not None: + result['Message'] = self.message + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.success is not None: + result['Success'] = self.success + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('HttpStatusCode') is not None: + self.http_status_code = m.get('HttpStatusCode') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('Success') is not None: + self.success = m.get('Success') + return self + + +class CreateInstanceResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateInstanceResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateInstanceResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class CreateInstanceShutdownTimerRequest(TeaModel): + def __init__( + self, + due_time: str = None, + remaining_time_in_ms: int = None, + ): + self.due_time = due_time + self.remaining_time_in_ms = remaining_time_in_ms + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.due_time is not None: + result['DueTime'] = self.due_time + if self.remaining_time_in_ms is not None: + result['RemainingTimeInMs'] = self.remaining_time_in_ms + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('DueTime') is not None: + self.due_time = m.get('DueTime') + if m.get('RemainingTimeInMs') is not None: + self.remaining_time_in_ms = m.get('RemainingTimeInMs') + return self + + +class CreateInstanceShutdownTimerResponseBody(TeaModel): + def __init__( + self, + code: str = None, + http_status_code: int = None, + instance_id: str = None, + message: str = None, + request_id: str = None, + success: bool = None, + ): + self.code = code + self.http_status_code = http_status_code + self.instance_id = instance_id + self.message = message + self.request_id = request_id + self.success = success + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code is not None: + result['Code'] = self.code + if self.http_status_code is not None: + result['HttpStatusCode'] = self.http_status_code + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + if self.message is not None: + result['Message'] = self.message + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.success is not None: + result['Success'] = self.success + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('HttpStatusCode') is not None: + self.http_status_code = m.get('HttpStatusCode') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('Success') is not None: + self.success = m.get('Success') + return self + + +class CreateInstanceShutdownTimerResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateInstanceShutdownTimerResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateInstanceShutdownTimerResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class CreateInstanceSnapshotRequestLabels(TeaModel): + def __init__( + self, + key: str = None, + value: str = None, + ): + self.key = key + self.value = value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + +class CreateInstanceSnapshotRequest(TeaModel): + def __init__( + self, + exclude_paths: List[str] = None, + image_url: str = None, + labels: List[CreateInstanceSnapshotRequestLabels] = None, + overwrite: bool = None, + snapshot_description: str = None, + snapshot_name: str = None, + ): + self.exclude_paths = exclude_paths + self.image_url = image_url + self.labels = labels + self.overwrite = overwrite + self.snapshot_description = snapshot_description + self.snapshot_name = snapshot_name + + def validate(self): + if self.labels: + for k in self.labels: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.exclude_paths is not None: + result['ExcludePaths'] = self.exclude_paths + if self.image_url is not None: + result['ImageUrl'] = self.image_url + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.overwrite is not None: + result['Overwrite'] = self.overwrite + if self.snapshot_description is not None: + result['SnapshotDescription'] = self.snapshot_description + if self.snapshot_name is not None: + result['SnapshotName'] = self.snapshot_name + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ExcludePaths') is not None: + self.exclude_paths = m.get('ExcludePaths') + if m.get('ImageUrl') is not None: + self.image_url = m.get('ImageUrl') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = CreateInstanceSnapshotRequestLabels() + self.labels.append(temp_model.from_map(k)) + if m.get('Overwrite') is not None: + self.overwrite = m.get('Overwrite') + if m.get('SnapshotDescription') is not None: + self.snapshot_description = m.get('SnapshotDescription') + if m.get('SnapshotName') is not None: + self.snapshot_name = m.get('SnapshotName') + return self + + +class CreateInstanceSnapshotResponseBody(TeaModel): + def __init__( + self, + code: str = None, + http_status_code: int = None, + instance_id: str = None, + message: str = None, + request_id: str = None, + snapshot_id: str = None, + success: bool = None, + ): + self.code = code + self.http_status_code = http_status_code + self.instance_id = instance_id + self.message = message + self.request_id = request_id + self.snapshot_id = snapshot_id + self.success = success + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code is not None: + result['Code'] = self.code + if self.http_status_code is not None: + result['HttpStatusCode'] = self.http_status_code + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + if self.message is not None: + result['Message'] = self.message + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.snapshot_id is not None: + result['SnapshotId'] = self.snapshot_id + if self.success is not None: + result['Success'] = self.success + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('HttpStatusCode') is not None: + self.http_status_code = m.get('HttpStatusCode') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('SnapshotId') is not None: + self.snapshot_id = m.get('SnapshotId') + if m.get('Success') is not None: + self.success = m.get('Success') + return self + + +class CreateInstanceSnapshotResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateInstanceSnapshotResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateInstanceSnapshotResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DeleteIdleInstanceCullerResponseBody(TeaModel): + def __init__( + self, + code: str = None, + instance_id: str = None, + message: str = None, + request_id: str = None, + success: bool = None, + ): + self.code = code + self.instance_id = instance_id + self.message = message + self.request_id = request_id + self.success = success + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code is not None: + result['Code'] = self.code + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + if self.message is not None: + result['Message'] = self.message + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.success is not None: + result['Success'] = self.success + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('Success') is not None: + self.success = m.get('Success') + return self + + +class DeleteIdleInstanceCullerResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: DeleteIdleInstanceCullerResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DeleteIdleInstanceCullerResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DeleteInstanceResponseBody(TeaModel): + def __init__( + self, + code: str = None, + http_status_code: int = None, + instance_id: str = None, + message: str = None, + request_id: str = None, + success: bool = None, + ): + self.code = code + self.http_status_code = http_status_code + self.instance_id = instance_id + self.message = message + self.request_id = request_id + self.success = success + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code is not None: + result['Code'] = self.code + if self.http_status_code is not None: + result['HttpStatusCode'] = self.http_status_code + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + if self.message is not None: + result['Message'] = self.message + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.success is not None: + result['Success'] = self.success + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('HttpStatusCode') is not None: + self.http_status_code = m.get('HttpStatusCode') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('Success') is not None: + self.success = m.get('Success') + return self + + +class DeleteInstanceResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: DeleteInstanceResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DeleteInstanceResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DeleteInstanceShutdownTimerResponseBody(TeaModel): + def __init__( + self, + code: str = None, + http_status_code: int = None, + instance_id: str = None, + message: str = None, + request_id: str = None, + success: bool = None, + ): + self.code = code + self.http_status_code = http_status_code + self.instance_id = instance_id + self.message = message + self.request_id = request_id + self.success = success + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code is not None: + result['Code'] = self.code + if self.http_status_code is not None: + result['HttpStatusCode'] = self.http_status_code + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + if self.message is not None: + result['Message'] = self.message + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.success is not None: + result['Success'] = self.success + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('HttpStatusCode') is not None: + self.http_status_code = m.get('HttpStatusCode') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('Success') is not None: + self.success = m.get('Success') + return self + + +class DeleteInstanceShutdownTimerResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: DeleteInstanceShutdownTimerResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DeleteInstanceShutdownTimerResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DeleteInstanceSnapshotResponseBody(TeaModel): + def __init__( + self, + code: str = None, + http_status_code: int = None, + instance_id: str = None, + message: str = None, + request_id: str = None, + snapshot_id: str = None, + success: bool = None, + ): + self.code = code + self.http_status_code = http_status_code + self.instance_id = instance_id + self.message = message + self.request_id = request_id + self.snapshot_id = snapshot_id + self.success = success + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code is not None: + result['Code'] = self.code + if self.http_status_code is not None: + result['HttpStatusCode'] = self.http_status_code + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + if self.message is not None: + result['Message'] = self.message + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.snapshot_id is not None: + result['SnapshotId'] = self.snapshot_id + if self.success is not None: + result['Success'] = self.success + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('HttpStatusCode') is not None: + self.http_status_code = m.get('HttpStatusCode') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('SnapshotId') is not None: + self.snapshot_id = m.get('SnapshotId') + if m.get('Success') is not None: + self.success = m.get('Success') + return self + + +class DeleteInstanceSnapshotResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: DeleteInstanceSnapshotResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DeleteInstanceSnapshotResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class GetIdleInstanceCullerResponseBody(TeaModel): + def __init__( + self, + code: str = None, + cpu_percent_threshold: int = None, + gpu_percent_threshold: int = None, + idle_time_in_minutes: int = None, + instance_id: str = None, + max_idle_time_in_minutes: int = None, + message: str = None, + request_id: str = None, + success: bool = None, + ): + self.code = code + self.cpu_percent_threshold = cpu_percent_threshold + self.gpu_percent_threshold = gpu_percent_threshold + self.idle_time_in_minutes = idle_time_in_minutes + self.instance_id = instance_id + self.max_idle_time_in_minutes = max_idle_time_in_minutes + self.message = message + self.request_id = request_id + self.success = success + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code is not None: + result['Code'] = self.code + if self.cpu_percent_threshold is not None: + result['CpuPercentThreshold'] = self.cpu_percent_threshold + if self.gpu_percent_threshold is not None: + result['GpuPercentThreshold'] = self.gpu_percent_threshold + if self.idle_time_in_minutes is not None: + result['IdleTimeInMinutes'] = self.idle_time_in_minutes + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + if self.max_idle_time_in_minutes is not None: + result['MaxIdleTimeInMinutes'] = self.max_idle_time_in_minutes + if self.message is not None: + result['Message'] = self.message + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.success is not None: + result['Success'] = self.success + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('CpuPercentThreshold') is not None: + self.cpu_percent_threshold = m.get('CpuPercentThreshold') + if m.get('GpuPercentThreshold') is not None: + self.gpu_percent_threshold = m.get('GpuPercentThreshold') + if m.get('IdleTimeInMinutes') is not None: + self.idle_time_in_minutes = m.get('IdleTimeInMinutes') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + if m.get('MaxIdleTimeInMinutes') is not None: + self.max_idle_time_in_minutes = m.get('MaxIdleTimeInMinutes') + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('Success') is not None: + self.success = m.get('Success') + return self + + +class GetIdleInstanceCullerResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetIdleInstanceCullerResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetIdleInstanceCullerResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class GetInstanceResponseBodyCloudDisks(TeaModel): + def __init__( + self, + capacity: str = None, + mount_path: str = None, + path: str = None, + sub_type: str = None, + ): + self.capacity = capacity + self.mount_path = mount_path + self.path = path + self.sub_type = sub_type + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.capacity is not None: + result['Capacity'] = self.capacity + if self.mount_path is not None: + result['MountPath'] = self.mount_path + if self.path is not None: + result['Path'] = self.path + if self.sub_type is not None: + result['SubType'] = self.sub_type + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Capacity') is not None: + self.capacity = m.get('Capacity') + if m.get('MountPath') is not None: + self.mount_path = m.get('MountPath') + if m.get('Path') is not None: + self.path = m.get('Path') + if m.get('SubType') is not None: + self.sub_type = m.get('SubType') + return self + + +class GetInstanceResponseBodyDatasets(TeaModel): + def __init__( + self, + dataset_id: str = None, + mount_path: str = None, + ): + self.dataset_id = dataset_id + self.mount_path = mount_path + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.dataset_id is not None: + result['DatasetId'] = self.dataset_id + if self.mount_path is not None: + result['MountPath'] = self.mount_path + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('DatasetId') is not None: + self.dataset_id = m.get('DatasetId') + if m.get('MountPath') is not None: + self.mount_path = m.get('MountPath') + return self + + +class GetInstanceResponseBodyIdleInstanceCuller(TeaModel): + def __init__( + self, + cpu_percent_threshold: int = None, + gpu_percent_threshold: int = None, + idle_time_in_minutes: int = None, + instance_id: str = None, + max_idle_time_in_minutes: int = None, + ): + self.cpu_percent_threshold = cpu_percent_threshold + self.gpu_percent_threshold = gpu_percent_threshold + self.idle_time_in_minutes = idle_time_in_minutes + self.instance_id = instance_id + self.max_idle_time_in_minutes = max_idle_time_in_minutes + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.cpu_percent_threshold is not None: + result['CpuPercentThreshold'] = self.cpu_percent_threshold + if self.gpu_percent_threshold is not None: + result['GpuPercentThreshold'] = self.gpu_percent_threshold + if self.idle_time_in_minutes is not None: + result['IdleTimeInMinutes'] = self.idle_time_in_minutes + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + if self.max_idle_time_in_minutes is not None: + result['MaxIdleTimeInMinutes'] = self.max_idle_time_in_minutes + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('CpuPercentThreshold') is not None: + self.cpu_percent_threshold = m.get('CpuPercentThreshold') + if m.get('GpuPercentThreshold') is not None: + self.gpu_percent_threshold = m.get('GpuPercentThreshold') + if m.get('IdleTimeInMinutes') is not None: + self.idle_time_in_minutes = m.get('IdleTimeInMinutes') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + if m.get('MaxIdleTimeInMinutes') is not None: + self.max_idle_time_in_minutes = m.get('MaxIdleTimeInMinutes') + return self + + +class GetInstanceResponseBodyInstanceShutdownTimer(TeaModel): + def __init__( + self, + due_time: str = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + instance_id: str = None, + remaining_time_in_ms: int = None, + ): + self.due_time = due_time + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.instance_id = instance_id + self.remaining_time_in_ms = remaining_time_in_ms + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.due_time is not None: + result['DueTime'] = self.due_time + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + if self.remaining_time_in_ms is not None: + result['RemainingTimeInMs'] = self.remaining_time_in_ms + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('DueTime') is not None: + self.due_time = m.get('DueTime') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + if m.get('RemainingTimeInMs') is not None: + self.remaining_time_in_ms = m.get('RemainingTimeInMs') + return self + + +class GetInstanceResponseBodyInstanceSnapshotList(TeaModel): + def __init__( + self, + gmt_create_time: str = None, + gmt_modified_time: str = None, + image_id: str = None, + image_name: str = None, + image_url: str = None, + reason_code: str = None, + reason_message: str = None, + repository_url: str = None, + status: str = None, + ): + # 快照创建时间 + self.gmt_create_time = gmt_create_time + # 快照修改时间 + self.gmt_modified_time = gmt_modified_time + # 镜像Id + self.image_id = image_id + # 镜像名称 + self.image_name = image_name + # 镜像Url + self.image_url = image_url + # 实例快照错误代码 + self.reason_code = reason_code + # 实例快照错误消息 + self.reason_message = reason_message + # 镜像仓库Url + self.repository_url = repository_url + # 实例快照状态 + self.status = status + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.image_id is not None: + result['ImageId'] = self.image_id + if self.image_name is not None: + result['ImageName'] = self.image_name + if self.image_url is not None: + result['ImageUrl'] = self.image_url + if self.reason_code is not None: + result['ReasonCode'] = self.reason_code + if self.reason_message is not None: + result['ReasonMessage'] = self.reason_message + if self.repository_url is not None: + result['RepositoryUrl'] = self.repository_url + if self.status is not None: + result['Status'] = self.status + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('ImageId') is not None: + self.image_id = m.get('ImageId') + if m.get('ImageName') is not None: + self.image_name = m.get('ImageName') + if m.get('ImageUrl') is not None: + self.image_url = m.get('ImageUrl') + if m.get('ReasonCode') is not None: + self.reason_code = m.get('ReasonCode') + if m.get('ReasonMessage') is not None: + self.reason_message = m.get('ReasonMessage') + if m.get('RepositoryUrl') is not None: + self.repository_url = m.get('RepositoryUrl') + if m.get('Status') is not None: + self.status = m.get('Status') + return self + + +class GetInstanceResponseBodyLabels(TeaModel): + def __init__( + self, + key: str = None, + value: str = None, + ): + self.key = key + self.value = value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + +class GetInstanceResponseBodyLatestSnapshot(TeaModel): + def __init__( + self, + gmt_create_time: str = None, + gmt_modified_time: str = None, + image_id: str = None, + image_name: str = None, + image_url: str = None, + reason_code: str = None, + reason_message: str = None, + repository_url: str = None, + status: str = None, + ): + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.image_id = image_id + self.image_name = image_name + self.image_url = image_url + # 实例快照错误代码 + self.reason_code = reason_code + # 实例快照错误消息 + self.reason_message = reason_message + self.repository_url = repository_url + # 实例快照状态 + self.status = status + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.image_id is not None: + result['ImageId'] = self.image_id + if self.image_name is not None: + result['ImageName'] = self.image_name + if self.image_url is not None: + result['ImageUrl'] = self.image_url + if self.reason_code is not None: + result['ReasonCode'] = self.reason_code + if self.reason_message is not None: + result['ReasonMessage'] = self.reason_message + if self.repository_url is not None: + result['RepositoryUrl'] = self.repository_url + if self.status is not None: + result['Status'] = self.status + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('ImageId') is not None: + self.image_id = m.get('ImageId') + if m.get('ImageName') is not None: + self.image_name = m.get('ImageName') + if m.get('ImageUrl') is not None: + self.image_url = m.get('ImageUrl') + if m.get('ReasonCode') is not None: + self.reason_code = m.get('ReasonCode') + if m.get('ReasonMessage') is not None: + self.reason_message = m.get('ReasonMessage') + if m.get('RepositoryUrl') is not None: + self.repository_url = m.get('RepositoryUrl') + if m.get('Status') is not None: + self.status = m.get('Status') + return self + + +class GetInstanceResponseBodyNodeErrorRecovery(TeaModel): + def __init__( + self, + auto_switch_countdown_seconds: int = None, + enable_auto_switch_on_node_error: bool = None, + has_node_error: bool = None, + ): + self.auto_switch_countdown_seconds = auto_switch_countdown_seconds + self.enable_auto_switch_on_node_error = enable_auto_switch_on_node_error + self.has_node_error = has_node_error + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.auto_switch_countdown_seconds is not None: + result['autoSwitchCountdownSeconds'] = self.auto_switch_countdown_seconds + if self.enable_auto_switch_on_node_error is not None: + result['enableAutoSwitchOnNodeError'] = self.enable_auto_switch_on_node_error + if self.has_node_error is not None: + result['hasNodeError'] = self.has_node_error + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('autoSwitchCountdownSeconds') is not None: + self.auto_switch_countdown_seconds = m.get('autoSwitchCountdownSeconds') + if m.get('enableAutoSwitchOnNodeError') is not None: + self.enable_auto_switch_on_node_error = m.get('enableAutoSwitchOnNodeError') + if m.get('hasNodeError') is not None: + self.has_node_error = m.get('hasNodeError') + return self + + +class GetInstanceResponseBodyRequestedResource(TeaModel): + def __init__( + self, + cpu: str = None, + gpu: str = None, + gputype: str = None, + memory: str = None, + shared_memory: str = None, + ): + self.cpu = cpu + self.gpu = gpu + self.gputype = gputype + self.memory = memory + self.shared_memory = shared_memory + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.cpu is not None: + result['CPU'] = self.cpu + if self.gpu is not None: + result['GPU'] = self.gpu + if self.gputype is not None: + result['GPUType'] = self.gputype + if self.memory is not None: + result['Memory'] = self.memory + if self.shared_memory is not None: + result['SharedMemory'] = self.shared_memory + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('CPU') is not None: + self.cpu = m.get('CPU') + if m.get('GPU') is not None: + self.gpu = m.get('GPU') + if m.get('GPUType') is not None: + self.gputype = m.get('GPUType') + if m.get('Memory') is not None: + self.memory = m.get('Memory') + if m.get('SharedMemory') is not None: + self.shared_memory = m.get('SharedMemory') + return self + + +class GetInstanceResponseBodyUserVpc(TeaModel): + def __init__( + self, + default_route: str = None, + extended_cidrs: List[str] = None, + forward_infos: List[ForwardInfoResponse] = None, + security_group_id: str = None, + v_switch_id: str = None, + vpc_id: str = None, + ): + self.default_route = default_route + self.extended_cidrs = extended_cidrs + self.forward_infos = forward_infos + self.security_group_id = security_group_id + self.v_switch_id = v_switch_id + # Vpc Id。 + self.vpc_id = vpc_id + + def validate(self): + if self.forward_infos: + for k in self.forward_infos: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.default_route is not None: + result['DefaultRoute'] = self.default_route + if self.extended_cidrs is not None: + result['ExtendedCIDRs'] = self.extended_cidrs + result['ForwardInfos'] = [] + if self.forward_infos is not None: + for k in self.forward_infos: + result['ForwardInfos'].append(k.to_map() if k else None) + if self.security_group_id is not None: + result['SecurityGroupId'] = self.security_group_id + if self.v_switch_id is not None: + result['VSwitchId'] = self.v_switch_id + if self.vpc_id is not None: + result['VpcId'] = self.vpc_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('DefaultRoute') is not None: + self.default_route = m.get('DefaultRoute') + if m.get('ExtendedCIDRs') is not None: + self.extended_cidrs = m.get('ExtendedCIDRs') + self.forward_infos = [] + if m.get('ForwardInfos') is not None: + for k in m.get('ForwardInfos'): + temp_model = ForwardInfoResponse() + self.forward_infos.append(temp_model.from_map(k)) + if m.get('SecurityGroupId') is not None: + self.security_group_id = m.get('SecurityGroupId') + if m.get('VSwitchId') is not None: + self.v_switch_id = m.get('VSwitchId') + if m.get('VpcId') is not None: + self.vpc_id = m.get('VpcId') + return self + + +class GetInstanceResponseBody(TeaModel): + def __init__( + self, + accelerator_type: str = None, + accessibility: str = None, + accumulated_running_time_in_ms: int = None, + cloud_disks: List[GetInstanceResponseBodyCloudDisks] = None, + code: str = None, + datasets: List[GetInstanceResponseBodyDatasets] = None, + driver: str = None, + ecs_spec: str = None, + environment_variables: Dict[str, str] = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + http_status_code: int = None, + idle_instance_culler: GetInstanceResponseBodyIdleInstanceCuller = None, + image_id: str = None, + image_name: str = None, + image_url: str = None, + instance_id: str = None, + instance_name: str = None, + instance_shutdown_timer: GetInstanceResponseBodyInstanceShutdownTimer = None, + instance_snapshot_list: List[GetInstanceResponseBodyInstanceSnapshotList] = None, + instance_url: str = None, + jupyterlab_url: str = None, + labels: List[GetInstanceResponseBodyLabels] = None, + latest_snapshot: GetInstanceResponseBodyLatestSnapshot = None, + message: str = None, + node_error_recovery: GetInstanceResponseBodyNodeErrorRecovery = None, + payment_type: str = None, + priority: int = None, + reason_code: str = None, + reason_message: str = None, + request_id: str = None, + requested_resource: GetInstanceResponseBodyRequestedResource = None, + resource_id: str = None, + resource_name: str = None, + status: str = None, + success: bool = None, + terminal_url: str = None, + user_id: str = None, + user_name: str = None, + user_vpc: GetInstanceResponseBodyUserVpc = None, + web_ideurl: str = None, + workspace_id: str = None, + workspace_name: str = None, + workspace_source: str = None, + ): + self.accelerator_type = accelerator_type + self.accessibility = accessibility + self.accumulated_running_time_in_ms = accumulated_running_time_in_ms + self.cloud_disks = cloud_disks + self.code = code + self.datasets = datasets + self.driver = driver + self.ecs_spec = ecs_spec + self.environment_variables = environment_variables + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.http_status_code = http_status_code + self.idle_instance_culler = idle_instance_culler + self.image_id = image_id + self.image_name = image_name + self.image_url = image_url + self.instance_id = instance_id + self.instance_name = instance_name + self.instance_shutdown_timer = instance_shutdown_timer + self.instance_snapshot_list = instance_snapshot_list + self.instance_url = instance_url + # Jupyterlab Url。 + self.jupyterlab_url = jupyterlab_url + self.labels = labels + self.latest_snapshot = latest_snapshot + self.message = message + self.node_error_recovery = node_error_recovery + self.payment_type = payment_type + self.priority = priority + self.reason_code = reason_code + self.reason_message = reason_message + self.request_id = request_id + self.requested_resource = requested_resource + self.resource_id = resource_id + self.resource_name = resource_name + self.status = status + self.success = success + self.terminal_url = terminal_url + self.user_id = user_id + self.user_name = user_name + self.user_vpc = user_vpc + # Web IDE url。 + self.web_ideurl = web_ideurl + self.workspace_id = workspace_id + self.workspace_name = workspace_name + self.workspace_source = workspace_source + + def validate(self): + if self.cloud_disks: + for k in self.cloud_disks: + if k: + k.validate() + if self.datasets: + for k in self.datasets: + if k: + k.validate() + if self.idle_instance_culler: + self.idle_instance_culler.validate() + if self.instance_shutdown_timer: + self.instance_shutdown_timer.validate() + if self.instance_snapshot_list: + for k in self.instance_snapshot_list: + if k: + k.validate() + if self.labels: + for k in self.labels: + if k: + k.validate() + if self.latest_snapshot: + self.latest_snapshot.validate() + if self.node_error_recovery: + self.node_error_recovery.validate() + if self.requested_resource: + self.requested_resource.validate() + if self.user_vpc: + self.user_vpc.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.accelerator_type is not None: + result['AcceleratorType'] = self.accelerator_type + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + if self.accumulated_running_time_in_ms is not None: + result['AccumulatedRunningTimeInMs'] = self.accumulated_running_time_in_ms + result['CloudDisks'] = [] + if self.cloud_disks is not None: + for k in self.cloud_disks: + result['CloudDisks'].append(k.to_map() if k else None) + if self.code is not None: + result['Code'] = self.code + result['Datasets'] = [] + if self.datasets is not None: + for k in self.datasets: + result['Datasets'].append(k.to_map() if k else None) + if self.driver is not None: + result['Driver'] = self.driver + if self.ecs_spec is not None: + result['EcsSpec'] = self.ecs_spec + if self.environment_variables is not None: + result['EnvironmentVariables'] = self.environment_variables + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.http_status_code is not None: + result['HttpStatusCode'] = self.http_status_code + if self.idle_instance_culler is not None: + result['IdleInstanceCuller'] = self.idle_instance_culler.to_map() + if self.image_id is not None: + result['ImageId'] = self.image_id + if self.image_name is not None: + result['ImageName'] = self.image_name + if self.image_url is not None: + result['ImageUrl'] = self.image_url + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + if self.instance_name is not None: + result['InstanceName'] = self.instance_name + if self.instance_shutdown_timer is not None: + result['InstanceShutdownTimer'] = self.instance_shutdown_timer.to_map() + result['InstanceSnapshotList'] = [] + if self.instance_snapshot_list is not None: + for k in self.instance_snapshot_list: + result['InstanceSnapshotList'].append(k.to_map() if k else None) + if self.instance_url is not None: + result['InstanceUrl'] = self.instance_url + if self.jupyterlab_url is not None: + result['JupyterlabUrl'] = self.jupyterlab_url + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.latest_snapshot is not None: + result['LatestSnapshot'] = self.latest_snapshot.to_map() + if self.message is not None: + result['Message'] = self.message + if self.node_error_recovery is not None: + result['NodeErrorRecovery'] = self.node_error_recovery.to_map() + if self.payment_type is not None: + result['PaymentType'] = self.payment_type + if self.priority is not None: + result['Priority'] = self.priority + if self.reason_code is not None: + result['ReasonCode'] = self.reason_code + if self.reason_message is not None: + result['ReasonMessage'] = self.reason_message + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.requested_resource is not None: + result['RequestedResource'] = self.requested_resource.to_map() + if self.resource_id is not None: + result['ResourceId'] = self.resource_id + if self.resource_name is not None: + result['ResourceName'] = self.resource_name + if self.status is not None: + result['Status'] = self.status + if self.success is not None: + result['Success'] = self.success + if self.terminal_url is not None: + result['TerminalUrl'] = self.terminal_url + if self.user_id is not None: + result['UserId'] = self.user_id + if self.user_name is not None: + result['UserName'] = self.user_name + if self.user_vpc is not None: + result['UserVpc'] = self.user_vpc.to_map() + if self.web_ideurl is not None: + result['WebIDEUrl'] = self.web_ideurl + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id + if self.workspace_name is not None: + result['WorkspaceName'] = self.workspace_name + if self.workspace_source is not None: + result['WorkspaceSource'] = self.workspace_source + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('AcceleratorType') is not None: + self.accelerator_type = m.get('AcceleratorType') + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + if m.get('AccumulatedRunningTimeInMs') is not None: + self.accumulated_running_time_in_ms = m.get('AccumulatedRunningTimeInMs') + self.cloud_disks = [] + if m.get('CloudDisks') is not None: + for k in m.get('CloudDisks'): + temp_model = GetInstanceResponseBodyCloudDisks() + self.cloud_disks.append(temp_model.from_map(k)) + if m.get('Code') is not None: + self.code = m.get('Code') + self.datasets = [] + if m.get('Datasets') is not None: + for k in m.get('Datasets'): + temp_model = GetInstanceResponseBodyDatasets() + self.datasets.append(temp_model.from_map(k)) + if m.get('Driver') is not None: + self.driver = m.get('Driver') + if m.get('EcsSpec') is not None: + self.ecs_spec = m.get('EcsSpec') + if m.get('EnvironmentVariables') is not None: + self.environment_variables = m.get('EnvironmentVariables') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('HttpStatusCode') is not None: + self.http_status_code = m.get('HttpStatusCode') + if m.get('IdleInstanceCuller') is not None: + temp_model = GetInstanceResponseBodyIdleInstanceCuller() + self.idle_instance_culler = temp_model.from_map(m['IdleInstanceCuller']) + if m.get('ImageId') is not None: + self.image_id = m.get('ImageId') + if m.get('ImageName') is not None: + self.image_name = m.get('ImageName') + if m.get('ImageUrl') is not None: + self.image_url = m.get('ImageUrl') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + if m.get('InstanceName') is not None: + self.instance_name = m.get('InstanceName') + if m.get('InstanceShutdownTimer') is not None: + temp_model = GetInstanceResponseBodyInstanceShutdownTimer() + self.instance_shutdown_timer = temp_model.from_map(m['InstanceShutdownTimer']) + self.instance_snapshot_list = [] + if m.get('InstanceSnapshotList') is not None: + for k in m.get('InstanceSnapshotList'): + temp_model = GetInstanceResponseBodyInstanceSnapshotList() + self.instance_snapshot_list.append(temp_model.from_map(k)) + if m.get('InstanceUrl') is not None: + self.instance_url = m.get('InstanceUrl') + if m.get('JupyterlabUrl') is not None: + self.jupyterlab_url = m.get('JupyterlabUrl') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = GetInstanceResponseBodyLabels() + self.labels.append(temp_model.from_map(k)) + if m.get('LatestSnapshot') is not None: + temp_model = GetInstanceResponseBodyLatestSnapshot() + self.latest_snapshot = temp_model.from_map(m['LatestSnapshot']) + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('NodeErrorRecovery') is not None: + temp_model = GetInstanceResponseBodyNodeErrorRecovery() + self.node_error_recovery = temp_model.from_map(m['NodeErrorRecovery']) + if m.get('PaymentType') is not None: + self.payment_type = m.get('PaymentType') + if m.get('Priority') is not None: + self.priority = m.get('Priority') + if m.get('ReasonCode') is not None: + self.reason_code = m.get('ReasonCode') + if m.get('ReasonMessage') is not None: + self.reason_message = m.get('ReasonMessage') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('RequestedResource') is not None: + temp_model = GetInstanceResponseBodyRequestedResource() + self.requested_resource = temp_model.from_map(m['RequestedResource']) + if m.get('ResourceId') is not None: + self.resource_id = m.get('ResourceId') + if m.get('ResourceName') is not None: + self.resource_name = m.get('ResourceName') + if m.get('Status') is not None: + self.status = m.get('Status') + if m.get('Success') is not None: + self.success = m.get('Success') + if m.get('TerminalUrl') is not None: + self.terminal_url = m.get('TerminalUrl') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('UserName') is not None: + self.user_name = m.get('UserName') + if m.get('UserVpc') is not None: + temp_model = GetInstanceResponseBodyUserVpc() + self.user_vpc = temp_model.from_map(m['UserVpc']) + if m.get('WebIDEUrl') is not None: + self.web_ideurl = m.get('WebIDEUrl') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + if m.get('WorkspaceName') is not None: + self.workspace_name = m.get('WorkspaceName') + if m.get('WorkspaceSource') is not None: + self.workspace_source = m.get('WorkspaceSource') + return self + + +class GetInstanceResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetInstanceResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetInstanceResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class GetInstanceEventsRequest(TeaModel): + def __init__( + self, + end_time: str = None, + max_events_num: int = None, + start_time: str = None, + ): + self.end_time = end_time + self.max_events_num = max_events_num + self.start_time = start_time + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.end_time is not None: + result['EndTime'] = self.end_time + if self.max_events_num is not None: + result['MaxEventsNum'] = self.max_events_num + if self.start_time is not None: + result['StartTime'] = self.start_time + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('EndTime') is not None: + self.end_time = m.get('EndTime') + if m.get('MaxEventsNum') is not None: + self.max_events_num = m.get('MaxEventsNum') + if m.get('StartTime') is not None: + self.start_time = m.get('StartTime') + return self + + +class GetInstanceEventsResponseBody(TeaModel): + def __init__( + self, + code: str = None, + events: List[str] = None, + http_status_code: int = None, + instance_id: str = None, + message: str = None, + request_id: str = None, + success: bool = None, + ): + self.code = code + self.events = events + self.http_status_code = http_status_code + self.instance_id = instance_id + self.message = message + self.request_id = request_id + self.success = success + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code is not None: + result['Code'] = self.code + if self.events is not None: + result['Events'] = self.events + if self.http_status_code is not None: + result['HttpStatusCode'] = self.http_status_code + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + if self.message is not None: + result['Message'] = self.message + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.success is not None: + result['Success'] = self.success + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('Events') is not None: + self.events = m.get('Events') + if m.get('HttpStatusCode') is not None: + self.http_status_code = m.get('HttpStatusCode') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('Success') is not None: + self.success = m.get('Success') + return self + + +class GetInstanceEventsResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetInstanceEventsResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetInstanceEventsResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class GetInstanceMetricsRequest(TeaModel): + def __init__( + self, + end_time: str = None, + metric_type: str = None, + start_time: str = None, + time_step: str = None, + ): + self.end_time = end_time + self.metric_type = metric_type + self.start_time = start_time + self.time_step = time_step + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.end_time is not None: + result['EndTime'] = self.end_time + if self.metric_type is not None: + result['MetricType'] = self.metric_type + if self.start_time is not None: + result['StartTime'] = self.start_time + if self.time_step is not None: + result['TimeStep'] = self.time_step + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('EndTime') is not None: + self.end_time = m.get('EndTime') + if m.get('MetricType') is not None: + self.metric_type = m.get('MetricType') + if m.get('StartTime') is not None: + self.start_time = m.get('StartTime') + if m.get('TimeStep') is not None: + self.time_step = m.get('TimeStep') + return self + + +class GetInstanceMetricsResponseBodyPodMetricsMetrics(TeaModel): + def __init__( + self, + time: int = None, + value: float = None, + ): + self.time = time + self.value = value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.time is not None: + result['Time'] = self.time + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Time') is not None: + self.time = m.get('Time') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + +class GetInstanceMetricsResponseBodyPodMetrics(TeaModel): + def __init__( + self, + metrics: List[GetInstanceMetricsResponseBodyPodMetricsMetrics] = None, + pod_id: str = None, + ): + self.metrics = metrics + self.pod_id = pod_id + + def validate(self): + if self.metrics: + for k in self.metrics: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + result['Metrics'] = [] + if self.metrics is not None: + for k in self.metrics: + result['Metrics'].append(k.to_map() if k else None) + if self.pod_id is not None: + result['PodId'] = self.pod_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + self.metrics = [] + if m.get('Metrics') is not None: + for k in m.get('Metrics'): + temp_model = GetInstanceMetricsResponseBodyPodMetricsMetrics() + self.metrics.append(temp_model.from_map(k)) + if m.get('PodId') is not None: + self.pod_id = m.get('PodId') + return self + + +class GetInstanceMetricsResponseBody(TeaModel): + def __init__( + self, + code: str = None, + http_status_code: int = None, + instance_id: str = None, + message: str = None, + pod_metrics: List[GetInstanceMetricsResponseBodyPodMetrics] = None, + request_id: str = None, + success: bool = None, + ): + self.code = code + self.http_status_code = http_status_code + self.instance_id = instance_id + self.message = message + self.pod_metrics = pod_metrics + self.request_id = request_id + self.success = success + + def validate(self): + if self.pod_metrics: + for k in self.pod_metrics: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code is not None: + result['Code'] = self.code + if self.http_status_code is not None: + result['HttpStatusCode'] = self.http_status_code + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + if self.message is not None: + result['Message'] = self.message + result['PodMetrics'] = [] + if self.pod_metrics is not None: + for k in self.pod_metrics: + result['PodMetrics'].append(k.to_map() if k else None) + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.success is not None: + result['Success'] = self.success + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('HttpStatusCode') is not None: + self.http_status_code = m.get('HttpStatusCode') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + if m.get('Message') is not None: + self.message = m.get('Message') + self.pod_metrics = [] + if m.get('PodMetrics') is not None: + for k in m.get('PodMetrics'): + temp_model = GetInstanceMetricsResponseBodyPodMetrics() + self.pod_metrics.append(temp_model.from_map(k)) + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('Success') is not None: + self.success = m.get('Success') + return self + + +class GetInstanceMetricsResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetInstanceMetricsResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetInstanceMetricsResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class GetInstanceShutdownTimerResponseBody(TeaModel): + def __init__( + self, + code: str = None, + due_time: str = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + http_status_code: int = None, + instance_id: str = None, + message: str = None, + remaining_time_in_ms: int = None, + request_id: str = None, + success: bool = None, + ): + self.code = code + self.due_time = due_time + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.http_status_code = http_status_code + self.instance_id = instance_id + self.message = message + self.remaining_time_in_ms = remaining_time_in_ms + self.request_id = request_id + self.success = success + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code is not None: + result['Code'] = self.code + if self.due_time is not None: + result['DueTime'] = self.due_time + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.http_status_code is not None: + result['HttpStatusCode'] = self.http_status_code + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + if self.message is not None: + result['Message'] = self.message + if self.remaining_time_in_ms is not None: + result['RemainingTimeInMs'] = self.remaining_time_in_ms + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.success is not None: + result['Success'] = self.success + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('DueTime') is not None: + self.due_time = m.get('DueTime') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('HttpStatusCode') is not None: + self.http_status_code = m.get('HttpStatusCode') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('RemainingTimeInMs') is not None: + self.remaining_time_in_ms = m.get('RemainingTimeInMs') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('Success') is not None: + self.success = m.get('Success') + return self + + +class GetInstanceShutdownTimerResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetInstanceShutdownTimerResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetInstanceShutdownTimerResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class GetInstanceSnapshotResponseBodyLabels(TeaModel): + def __init__( + self, + key: str = None, + value: str = None, + ): + self.key = key + self.value = value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + +class GetInstanceSnapshotResponseBody(TeaModel): + def __init__( + self, + code: str = None, + exclude_paths: List[str] = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + http_status_code: int = None, + image_id: str = None, + image_url: str = None, + instance_id: str = None, + labels: List[GetInstanceSnapshotResponseBodyLabels] = None, + message: str = None, + reason_code: str = None, + reason_message: str = None, + request_id: str = None, + snapshot_id: str = None, + snapshot_name: str = None, + status: str = None, + success: bool = None, + ): + self.code = code + self.exclude_paths = exclude_paths + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.http_status_code = http_status_code + self.image_id = image_id + self.image_url = image_url + self.instance_id = instance_id + self.labels = labels + self.message = message + self.reason_code = reason_code + self.reason_message = reason_message + self.request_id = request_id + self.snapshot_id = snapshot_id + self.snapshot_name = snapshot_name + self.status = status + self.success = success + + def validate(self): + if self.labels: + for k in self.labels: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code is not None: + result['Code'] = self.code + if self.exclude_paths is not None: + result['ExcludePaths'] = self.exclude_paths + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.http_status_code is not None: + result['HttpStatusCode'] = self.http_status_code + if self.image_id is not None: + result['ImageId'] = self.image_id + if self.image_url is not None: + result['ImageUrl'] = self.image_url + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.message is not None: + result['Message'] = self.message + if self.reason_code is not None: + result['ReasonCode'] = self.reason_code + if self.reason_message is not None: + result['ReasonMessage'] = self.reason_message + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.snapshot_id is not None: + result['SnapshotId'] = self.snapshot_id + if self.snapshot_name is not None: + result['SnapshotName'] = self.snapshot_name + if self.status is not None: + result['Status'] = self.status + if self.success is not None: + result['Success'] = self.success + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('ExcludePaths') is not None: + self.exclude_paths = m.get('ExcludePaths') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('HttpStatusCode') is not None: + self.http_status_code = m.get('HttpStatusCode') + if m.get('ImageId') is not None: + self.image_id = m.get('ImageId') + if m.get('ImageUrl') is not None: + self.image_url = m.get('ImageUrl') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = GetInstanceSnapshotResponseBodyLabels() + self.labels.append(temp_model.from_map(k)) + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('ReasonCode') is not None: + self.reason_code = m.get('ReasonCode') + if m.get('ReasonMessage') is not None: + self.reason_message = m.get('ReasonMessage') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('SnapshotId') is not None: + self.snapshot_id = m.get('SnapshotId') + if m.get('SnapshotName') is not None: + self.snapshot_name = m.get('SnapshotName') + if m.get('Status') is not None: + self.status = m.get('Status') + if m.get('Success') is not None: + self.success = m.get('Success') + return self + + +class GetInstanceSnapshotResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetInstanceSnapshotResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetInstanceSnapshotResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class GetLifecycleRequest(TeaModel): + def __init__( + self, + end_time: str = None, + limit: int = None, + order: str = None, + session_number: int = None, + start_time: str = None, + ): + self.end_time = end_time + self.limit = limit + self.order = order + self.session_number = session_number + self.start_time = start_time + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.end_time is not None: + result['EndTime'] = self.end_time + if self.limit is not None: + result['Limit'] = self.limit + if self.order is not None: + result['Order'] = self.order + if self.session_number is not None: + result['SessionNumber'] = self.session_number + if self.start_time is not None: + result['StartTime'] = self.start_time + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('EndTime') is not None: + self.end_time = m.get('EndTime') + if m.get('Limit') is not None: + self.limit = m.get('Limit') + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('SessionNumber') is not None: + self.session_number = m.get('SessionNumber') + if m.get('StartTime') is not None: + self.start_time = m.get('StartTime') + return self + + +class GetLifecycleResponseBodyLifecycle(TeaModel): + def __init__( + self, + status: str = None, + reason_code: str = None, + reason_message: str = None, + gmt_create_time: str = None, + ): + self.status = status + self.reason_code = reason_code + self.reason_message = reason_message + self.gmt_create_time = gmt_create_time + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.status is not None: + result['Status'] = self.status + if self.reason_code is not None: + result['ReasonCode'] = self.reason_code + if self.reason_message is not None: + result['ReasonMessage'] = self.reason_message + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Status') is not None: + self.status = m.get('Status') + if m.get('ReasonCode') is not None: + self.reason_code = m.get('ReasonCode') + if m.get('ReasonMessage') is not None: + self.reason_message = m.get('ReasonMessage') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + return self + + +class GetLifecycleResponseBody(TeaModel): + def __init__( + self, + code: str = None, + lifecycle: List[List[GetLifecycleResponseBodyLifecycle]] = None, + message: str = None, + request_id: str = None, + success: bool = None, + total_count: int = None, + ): + self.code = code + self.lifecycle = lifecycle + self.message = message + self.request_id = request_id + self.success = success + self.total_count = total_count + + def validate(self): + if self.lifecycle: + for k in self.lifecycle: + for k1 in k: + if k1: + k1.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code is not None: + result['Code'] = self.code + result['Lifecycle'] = [] + if self.lifecycle is not None: + for k in self.lifecycle: + l1 = [] + for k1 in k: + l1.append(k1.to_map() if k1 else None) + result['Lifecycle'].append(l1) + if self.message is not None: + result['Message'] = self.message + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.success is not None: + result['Success'] = self.success + if self.total_count is not None: + result['TotalCount'] = self.total_count + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Code') is not None: + self.code = m.get('Code') + self.lifecycle = [] + if m.get('Lifecycle') is not None: + for k in m.get('Lifecycle'): + l1 = [] + for k1 in k: + temp_model = GetLifecycleResponseBodyLifecycle() + l1.append(temp_model.from_map(k1)) + self.lifecycle.append(l1) + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('Success') is not None: + self.success = m.get('Success') + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') + return self + + +class GetLifecycleResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetLifecycleResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetLifecycleResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class GetResourceGroupStatisticsRequest(TeaModel): + def __init__( + self, + end_time: str = None, + resource_id: str = None, + start_time: str = None, + workspace_ids: str = None, + ): + self.end_time = end_time + self.resource_id = resource_id + self.start_time = start_time + self.workspace_ids = workspace_ids + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.end_time is not None: + result['EndTime'] = self.end_time + if self.resource_id is not None: + result['ResourceId'] = self.resource_id + if self.start_time is not None: + result['StartTime'] = self.start_time + if self.workspace_ids is not None: + result['WorkspaceIds'] = self.workspace_ids + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('EndTime') is not None: + self.end_time = m.get('EndTime') + if m.get('ResourceId') is not None: + self.resource_id = m.get('ResourceId') + if m.get('StartTime') is not None: + self.start_time = m.get('StartTime') + if m.get('WorkspaceIds') is not None: + self.workspace_ids = m.get('WorkspaceIds') + return self + + +class GetResourceGroupStatisticsResponseBody(TeaModel): + def __init__( + self, + code: str = None, + http_status_code: int = None, + message: str = None, + request_id: str = None, + statistics: Dict[str, dict] = None, + success: bool = None, + ): + self.code = code + self.http_status_code = http_status_code + self.message = message + self.request_id = request_id + self.statistics = statistics + self.success = success + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code is not None: + result['Code'] = self.code + if self.http_status_code is not None: + result['HttpStatusCode'] = self.http_status_code + if self.message is not None: + result['Message'] = self.message + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.statistics is not None: + result['Statistics'] = self.statistics + if self.success is not None: + result['Success'] = self.success + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('HttpStatusCode') is not None: + self.http_status_code = m.get('HttpStatusCode') + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('Statistics') is not None: + self.statistics = m.get('Statistics') + if m.get('Success') is not None: + self.success = m.get('Success') + return self + + +class GetResourceGroupStatisticsResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetResourceGroupStatisticsResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetResourceGroupStatisticsResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class GetTokenRequest(TeaModel): + def __init__( + self, + expire_time: int = None, + instance_id: str = None, + ): + self.expire_time = expire_time + self.instance_id = instance_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.expire_time is not None: + result['ExpireTime'] = self.expire_time + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ExpireTime') is not None: + self.expire_time = m.get('ExpireTime') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + return self + + +class GetTokenResponseBody(TeaModel): + def __init__( + self, + code: str = None, + message: str = None, + request_id: str = None, + success: bool = None, + token: str = None, + ): + self.code = code + self.message = message + self.request_id = request_id + self.success = success + self.token = token + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code is not None: + result['Code'] = self.code + if self.message is not None: + result['Message'] = self.message + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.success is not None: + result['Success'] = self.success + if self.token is not None: + result['Token'] = self.token + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('Success') is not None: + self.success = m.get('Success') + if m.get('Token') is not None: + self.token = m.get('Token') + return self + + +class GetTokenResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetTokenResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetTokenResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class GetUserConfigResponseBodyFreeTier(TeaModel): + def __init__( + self, + end_time: str = None, + init_base_unit: str = None, + init_base_value: float = None, + init_show_unit: str = None, + init_show_value: str = None, + is_free_tier_user: bool = None, + period_base_unit: str = None, + period_base_value: float = None, + period_show_unit: str = None, + period_show_value: str = None, + start_time: str = None, + status: str = None, + ): + self.end_time = end_time + self.init_base_unit = init_base_unit + self.init_base_value = init_base_value + self.init_show_unit = init_show_unit + self.init_show_value = init_show_value + self.is_free_tier_user = is_free_tier_user + self.period_base_unit = period_base_unit + self.period_base_value = period_base_value + self.period_show_unit = period_show_unit + self.period_show_value = period_show_value + self.start_time = start_time + self.status = status + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.end_time is not None: + result['EndTime'] = self.end_time + if self.init_base_unit is not None: + result['InitBaseUnit'] = self.init_base_unit + if self.init_base_value is not None: + result['InitBaseValue'] = self.init_base_value + if self.init_show_unit is not None: + result['InitShowUnit'] = self.init_show_unit + if self.init_show_value is not None: + result['InitShowValue'] = self.init_show_value + if self.is_free_tier_user is not None: + result['IsFreeTierUser'] = self.is_free_tier_user + if self.period_base_unit is not None: + result['PeriodBaseUnit'] = self.period_base_unit + if self.period_base_value is not None: + result['PeriodBaseValue'] = self.period_base_value + if self.period_show_unit is not None: + result['PeriodShowUnit'] = self.period_show_unit + if self.period_show_value is not None: + result['PeriodShowValue'] = self.period_show_value + if self.start_time is not None: + result['StartTime'] = self.start_time + if self.status is not None: + result['Status'] = self.status + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('EndTime') is not None: + self.end_time = m.get('EndTime') + if m.get('InitBaseUnit') is not None: + self.init_base_unit = m.get('InitBaseUnit') + if m.get('InitBaseValue') is not None: + self.init_base_value = m.get('InitBaseValue') + if m.get('InitShowUnit') is not None: + self.init_show_unit = m.get('InitShowUnit') + if m.get('InitShowValue') is not None: + self.init_show_value = m.get('InitShowValue') + if m.get('IsFreeTierUser') is not None: + self.is_free_tier_user = m.get('IsFreeTierUser') + if m.get('PeriodBaseUnit') is not None: + self.period_base_unit = m.get('PeriodBaseUnit') + if m.get('PeriodBaseValue') is not None: + self.period_base_value = m.get('PeriodBaseValue') + if m.get('PeriodShowUnit') is not None: + self.period_show_unit = m.get('PeriodShowUnit') + if m.get('PeriodShowValue') is not None: + self.period_show_value = m.get('PeriodShowValue') + if m.get('StartTime') is not None: + self.start_time = m.get('StartTime') + if m.get('Status') is not None: + self.status = m.get('Status') + return self + + +class GetUserConfigResponseBody(TeaModel): + def __init__( + self, + account_sufficient: bool = None, + code: str = None, + enable_eci_disk: bool = None, + free_tier: GetUserConfigResponseBodyFreeTier = None, + free_tier_spec_available: bool = None, + http_status_code: int = None, + message: str = None, + request_id: str = None, + success: bool = None, + ): + self.account_sufficient = account_sufficient + self.code = code + self.enable_eci_disk = enable_eci_disk + self.free_tier = free_tier + self.free_tier_spec_available = free_tier_spec_available + self.http_status_code = http_status_code + self.message = message + self.request_id = request_id + self.success = success + + def validate(self): + if self.free_tier: + self.free_tier.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.account_sufficient is not None: + result['AccountSufficient'] = self.account_sufficient + if self.code is not None: + result['Code'] = self.code + if self.enable_eci_disk is not None: + result['EnableEciDisk'] = self.enable_eci_disk + if self.free_tier is not None: + result['FreeTier'] = self.free_tier.to_map() + if self.free_tier_spec_available is not None: + result['FreeTierSpecAvailable'] = self.free_tier_spec_available + if self.http_status_code is not None: + result['HttpStatusCode'] = self.http_status_code + if self.message is not None: + result['Message'] = self.message + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.success is not None: + result['Success'] = self.success + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('AccountSufficient') is not None: + self.account_sufficient = m.get('AccountSufficient') + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('EnableEciDisk') is not None: + self.enable_eci_disk = m.get('EnableEciDisk') + if m.get('FreeTier') is not None: + temp_model = GetUserConfigResponseBodyFreeTier() + self.free_tier = temp_model.from_map(m['FreeTier']) + if m.get('FreeTierSpecAvailable') is not None: + self.free_tier_spec_available = m.get('FreeTierSpecAvailable') + if m.get('HttpStatusCode') is not None: + self.http_status_code = m.get('HttpStatusCode') + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('Success') is not None: + self.success = m.get('Success') + return self + + +class GetUserConfigResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetUserConfigResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetUserConfigResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class ListEcsSpecsRequest(TeaModel): + def __init__( + self, + accelerator_type: str = None, + order: str = None, + page_number: int = None, + page_size: int = None, + sort_by: str = None, + ): + self.accelerator_type = accelerator_type + self.order = order + self.page_number = page_number + self.page_size = page_size + self.sort_by = sort_by + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.accelerator_type is not None: + result['AcceleratorType'] = self.accelerator_type + if self.order is not None: + result['Order'] = self.order + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.sort_by is not None: + result['SortBy'] = self.sort_by + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('AcceleratorType') is not None: + self.accelerator_type = m.get('AcceleratorType') + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') + return self + + +class ListEcsSpecsResponseBodyEcsSpecsLabels(TeaModel): + def __init__( + self, + key: str = None, + value: str = None, + ): + self.key = key + self.value = value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + +class ListEcsSpecsResponseBodyEcsSpecs(TeaModel): + def __init__( + self, + accelerator_type: str = None, + cpu: int = None, + currency: str = None, + gpu: int = None, + gputype: str = None, + instance_bandwidth_rx: int = None, + instance_type: str = None, + is_available: bool = None, + labels: List[ListEcsSpecsResponseBodyEcsSpecsLabels] = None, + memory: float = None, + price: float = None, + system_disk_capacity: int = None, + ): + self.accelerator_type = accelerator_type + self.cpu = cpu + self.currency = currency + self.gpu = gpu + self.gputype = gputype + self.instance_bandwidth_rx = instance_bandwidth_rx + self.instance_type = instance_type + self.is_available = is_available + self.labels = labels + self.memory = memory + self.price = price + self.system_disk_capacity = system_disk_capacity + + def validate(self): + if self.labels: + for k in self.labels: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.accelerator_type is not None: + result['AcceleratorType'] = self.accelerator_type + if self.cpu is not None: + result['CPU'] = self.cpu + if self.currency is not None: + result['Currency'] = self.currency + if self.gpu is not None: + result['GPU'] = self.gpu + if self.gputype is not None: + result['GPUType'] = self.gputype + if self.instance_bandwidth_rx is not None: + result['InstanceBandwidthRx'] = self.instance_bandwidth_rx + if self.instance_type is not None: + result['InstanceType'] = self.instance_type + if self.is_available is not None: + result['IsAvailable'] = self.is_available + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.memory is not None: + result['Memory'] = self.memory + if self.price is not None: + result['Price'] = self.price + if self.system_disk_capacity is not None: + result['SystemDiskCapacity'] = self.system_disk_capacity + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('AcceleratorType') is not None: + self.accelerator_type = m.get('AcceleratorType') + if m.get('CPU') is not None: + self.cpu = m.get('CPU') + if m.get('Currency') is not None: + self.currency = m.get('Currency') + if m.get('GPU') is not None: + self.gpu = m.get('GPU') + if m.get('GPUType') is not None: + self.gputype = m.get('GPUType') + if m.get('InstanceBandwidthRx') is not None: + self.instance_bandwidth_rx = m.get('InstanceBandwidthRx') + if m.get('InstanceType') is not None: + self.instance_type = m.get('InstanceType') + if m.get('IsAvailable') is not None: + self.is_available = m.get('IsAvailable') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = ListEcsSpecsResponseBodyEcsSpecsLabels() + self.labels.append(temp_model.from_map(k)) + if m.get('Memory') is not None: + self.memory = m.get('Memory') + if m.get('Price') is not None: + self.price = m.get('Price') + if m.get('SystemDiskCapacity') is not None: + self.system_disk_capacity = m.get('SystemDiskCapacity') + return self + + +class ListEcsSpecsResponseBody(TeaModel): + def __init__( + self, + code: str = None, + ecs_specs: List[ListEcsSpecsResponseBodyEcsSpecs] = None, + http_status_code: int = None, + message: str = None, + request_id: str = None, + success: bool = None, + total_count: int = None, + ): + self.code = code + self.ecs_specs = ecs_specs + self.http_status_code = http_status_code + self.message = message + self.request_id = request_id + self.success = success + self.total_count = total_count + + def validate(self): + if self.ecs_specs: + for k in self.ecs_specs: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code is not None: + result['Code'] = self.code + result['EcsSpecs'] = [] + if self.ecs_specs is not None: + for k in self.ecs_specs: + result['EcsSpecs'].append(k.to_map() if k else None) + if self.http_status_code is not None: + result['HttpStatusCode'] = self.http_status_code + if self.message is not None: + result['Message'] = self.message + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.success is not None: + result['Success'] = self.success + if self.total_count is not None: + result['TotalCount'] = self.total_count + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Code') is not None: + self.code = m.get('Code') + self.ecs_specs = [] + if m.get('EcsSpecs') is not None: + for k in m.get('EcsSpecs'): + temp_model = ListEcsSpecsResponseBodyEcsSpecs() + self.ecs_specs.append(temp_model.from_map(k)) + if m.get('HttpStatusCode') is not None: + self.http_status_code = m.get('HttpStatusCode') + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('Success') is not None: + self.success = m.get('Success') + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') + return self + + +class ListEcsSpecsResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: ListEcsSpecsResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = ListEcsSpecsResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class ListInstanceSnapshotRequest(TeaModel): + def __init__( + self, + order: str = None, + page_number: int = None, + page_size: int = None, + sort_by: str = None, + ): + self.order = order + self.page_number = page_number + self.page_size = page_size + self.sort_by = sort_by + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.order is not None: + result['Order'] = self.order + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.sort_by is not None: + result['SortBy'] = self.sort_by + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') + return self + + +class ListInstanceSnapshotResponseBodySnapshotsLabels(TeaModel): + def __init__( + self, + key: str = None, + value: str = None, + ): + self.key = key + self.value = value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + +class ListInstanceSnapshotResponseBodySnapshots(TeaModel): + def __init__( + self, + exclude_paths: List[str] = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + image_id: str = None, + image_url: str = None, + instance_id: str = None, + labels: List[ListInstanceSnapshotResponseBodySnapshotsLabels] = None, + reason_code: str = None, + reason_message: str = None, + snapshot_id: str = None, + snapshot_name: str = None, + status: str = None, + ): + self.exclude_paths = exclude_paths + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.image_id = image_id + self.image_url = image_url + self.instance_id = instance_id + self.labels = labels + self.reason_code = reason_code + self.reason_message = reason_message + self.snapshot_id = snapshot_id + self.snapshot_name = snapshot_name + self.status = status + + def validate(self): + if self.labels: + for k in self.labels: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.exclude_paths is not None: + result['ExcludePaths'] = self.exclude_paths + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.image_id is not None: + result['ImageId'] = self.image_id + if self.image_url is not None: + result['ImageUrl'] = self.image_url + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.reason_code is not None: + result['ReasonCode'] = self.reason_code + if self.reason_message is not None: + result['ReasonMessage'] = self.reason_message + if self.snapshot_id is not None: + result['SnapshotId'] = self.snapshot_id + if self.snapshot_name is not None: + result['SnapshotName'] = self.snapshot_name + if self.status is not None: + result['Status'] = self.status + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ExcludePaths') is not None: + self.exclude_paths = m.get('ExcludePaths') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('ImageId') is not None: + self.image_id = m.get('ImageId') + if m.get('ImageUrl') is not None: + self.image_url = m.get('ImageUrl') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = ListInstanceSnapshotResponseBodySnapshotsLabels() + self.labels.append(temp_model.from_map(k)) + if m.get('ReasonCode') is not None: + self.reason_code = m.get('ReasonCode') + if m.get('ReasonMessage') is not None: + self.reason_message = m.get('ReasonMessage') + if m.get('SnapshotId') is not None: + self.snapshot_id = m.get('SnapshotId') + if m.get('SnapshotName') is not None: + self.snapshot_name = m.get('SnapshotName') + if m.get('Status') is not None: + self.status = m.get('Status') + return self + + +class ListInstanceSnapshotResponseBody(TeaModel): + def __init__( + self, + code: str = None, + http_status_code: int = None, + message: str = None, + request_id: str = None, + snapshots: List[ListInstanceSnapshotResponseBodySnapshots] = None, + success: bool = None, + total_count: int = None, + ): + self.code = code + self.http_status_code = http_status_code + self.message = message + self.request_id = request_id + self.snapshots = snapshots + self.success = success + self.total_count = total_count + + def validate(self): + if self.snapshots: + for k in self.snapshots: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code is not None: + result['Code'] = self.code + if self.http_status_code is not None: + result['HttpStatusCode'] = self.http_status_code + if self.message is not None: + result['Message'] = self.message + if self.request_id is not None: + result['RequestId'] = self.request_id + result['Snapshots'] = [] + if self.snapshots is not None: + for k in self.snapshots: + result['Snapshots'].append(k.to_map() if k else None) + if self.success is not None: + result['Success'] = self.success + if self.total_count is not None: + result['TotalCount'] = self.total_count + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('HttpStatusCode') is not None: + self.http_status_code = m.get('HttpStatusCode') + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + self.snapshots = [] + if m.get('Snapshots') is not None: + for k in m.get('Snapshots'): + temp_model = ListInstanceSnapshotResponseBodySnapshots() + self.snapshots.append(temp_model.from_map(k)) + if m.get('Success') is not None: + self.success = m.get('Success') + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') + return self + + +class ListInstanceSnapshotResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: ListInstanceSnapshotResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = ListInstanceSnapshotResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class ListInstanceStatisticsRequest(TeaModel): + def __init__( + self, + workspace_ids: str = None, + ): + self.workspace_ids = workspace_ids + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.workspace_ids is not None: + result['WorkspaceIds'] = self.workspace_ids + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('WorkspaceIds') is not None: + self.workspace_ids = m.get('WorkspaceIds') + return self + + +class ListInstanceStatisticsResponseBody(TeaModel): + def __init__( + self, + code: str = None, + http_status_code: int = None, + message: str = None, + request_id: str = None, + statistics: Dict[str, dict] = None, + success: bool = None, + ): + self.code = code + self.http_status_code = http_status_code + self.message = message + self.request_id = request_id + self.statistics = statistics + self.success = success + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code is not None: + result['Code'] = self.code + if self.http_status_code is not None: + result['HttpStatusCode'] = self.http_status_code + if self.message is not None: + result['Message'] = self.message + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.statistics is not None: + result['Statistics'] = self.statistics + if self.success is not None: + result['Success'] = self.success + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('HttpStatusCode') is not None: + self.http_status_code = m.get('HttpStatusCode') + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('Statistics') is not None: + self.statistics = m.get('Statistics') + if m.get('Success') is not None: + self.success = m.get('Success') + return self + + +class ListInstanceStatisticsResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: ListInstanceStatisticsResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = ListInstanceStatisticsResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class ListInstancesRequest(TeaModel): + def __init__( + self, + accelerator_type: str = None, + accessibility: str = None, + instance_id: str = None, + instance_name: str = None, + order: str = None, + page_number: int = None, + page_size: int = None, + payment_type: str = None, + resource_id: str = None, + sort_by: str = None, + status: str = None, + workspace_id: str = None, + ): + self.accelerator_type = accelerator_type + self.accessibility = accessibility + self.instance_id = instance_id + self.instance_name = instance_name + self.order = order + self.page_number = page_number + self.page_size = page_size + self.payment_type = payment_type + self.resource_id = resource_id + self.sort_by = sort_by + self.status = status + self.workspace_id = workspace_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.accelerator_type is not None: + result['AcceleratorType'] = self.accelerator_type + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + if self.instance_name is not None: + result['InstanceName'] = self.instance_name + if self.order is not None: + result['Order'] = self.order + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.payment_type is not None: + result['PaymentType'] = self.payment_type + if self.resource_id is not None: + result['ResourceId'] = self.resource_id + if self.sort_by is not None: + result['SortBy'] = self.sort_by + if self.status is not None: + result['Status'] = self.status + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('AcceleratorType') is not None: + self.accelerator_type = m.get('AcceleratorType') + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + if m.get('InstanceName') is not None: + self.instance_name = m.get('InstanceName') + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('PaymentType') is not None: + self.payment_type = m.get('PaymentType') + if m.get('ResourceId') is not None: + self.resource_id = m.get('ResourceId') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') + if m.get('Status') is not None: + self.status = m.get('Status') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + return self + + +class ListInstancesResponseBodyInstancesCloudDisks(TeaModel): + def __init__( + self, + capacity: str = None, + mount_path: str = None, + path: str = None, + sub_type: str = None, + ): + self.capacity = capacity + self.mount_path = mount_path + self.path = path + self.sub_type = sub_type + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.capacity is not None: + result['Capacity'] = self.capacity + if self.mount_path is not None: + result['MountPath'] = self.mount_path + if self.path is not None: + result['Path'] = self.path + if self.sub_type is not None: + result['SubType'] = self.sub_type + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Capacity') is not None: + self.capacity = m.get('Capacity') + if m.get('MountPath') is not None: + self.mount_path = m.get('MountPath') + if m.get('Path') is not None: + self.path = m.get('Path') + if m.get('SubType') is not None: + self.sub_type = m.get('SubType') + return self + + +class ListInstancesResponseBodyInstancesDatasets(TeaModel): + def __init__( + self, + dataset_id: str = None, + mount_path: str = None, + ): + self.dataset_id = dataset_id + self.mount_path = mount_path + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.dataset_id is not None: + result['DatasetId'] = self.dataset_id + if self.mount_path is not None: + result['MountPath'] = self.mount_path + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('DatasetId') is not None: + self.dataset_id = m.get('DatasetId') + if m.get('MountPath') is not None: + self.mount_path = m.get('MountPath') + return self + + +class ListInstancesResponseBodyInstancesIdleInstanceCuller(TeaModel): + def __init__( + self, + cpu_percent_threshold: int = None, + gpu_percent_threshold: int = None, + idle_time_in_minutes: int = None, + instance_id: str = None, + max_idle_time_in_minutes: int = None, + ): + self.cpu_percent_threshold = cpu_percent_threshold + self.gpu_percent_threshold = gpu_percent_threshold + self.idle_time_in_minutes = idle_time_in_minutes + self.instance_id = instance_id + self.max_idle_time_in_minutes = max_idle_time_in_minutes + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.cpu_percent_threshold is not None: + result['CpuPercentThreshold'] = self.cpu_percent_threshold + if self.gpu_percent_threshold is not None: + result['GpuPercentThreshold'] = self.gpu_percent_threshold + if self.idle_time_in_minutes is not None: + result['IdleTimeInMinutes'] = self.idle_time_in_minutes + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + if self.max_idle_time_in_minutes is not None: + result['MaxIdleTimeInMinutes'] = self.max_idle_time_in_minutes + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('CpuPercentThreshold') is not None: + self.cpu_percent_threshold = m.get('CpuPercentThreshold') + if m.get('GpuPercentThreshold') is not None: + self.gpu_percent_threshold = m.get('GpuPercentThreshold') + if m.get('IdleTimeInMinutes') is not None: + self.idle_time_in_minutes = m.get('IdleTimeInMinutes') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + if m.get('MaxIdleTimeInMinutes') is not None: + self.max_idle_time_in_minutes = m.get('MaxIdleTimeInMinutes') + return self + + +class ListInstancesResponseBodyInstancesInstanceShutdownTimer(TeaModel): + def __init__( + self, + due_time: str = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + instance_id: str = None, + remaining_time_in_ms: int = None, + ): + self.due_time = due_time + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.instance_id = instance_id + self.remaining_time_in_ms = remaining_time_in_ms + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.due_time is not None: + result['DueTime'] = self.due_time + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + if self.remaining_time_in_ms is not None: + result['RemainingTimeInMs'] = self.remaining_time_in_ms + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('DueTime') is not None: + self.due_time = m.get('DueTime') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + if m.get('RemainingTimeInMs') is not None: + self.remaining_time_in_ms = m.get('RemainingTimeInMs') + return self + + +class ListInstancesResponseBodyInstancesInstanceSnapshotList(TeaModel): + def __init__( + self, + gmt_create_time: str = None, + gmt_modified_time: str = None, + image_id: str = None, + image_name: str = None, + image_url: str = None, + reason_code: str = None, + reason_message: str = None, + repository_url: str = None, + status: str = None, + ): + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.image_id = image_id + self.image_name = image_name + self.image_url = image_url + self.reason_code = reason_code + self.reason_message = reason_message + self.repository_url = repository_url + self.status = status + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.image_id is not None: + result['ImageId'] = self.image_id + if self.image_name is not None: + result['ImageName'] = self.image_name + if self.image_url is not None: + result['ImageUrl'] = self.image_url + if self.reason_code is not None: + result['ReasonCode'] = self.reason_code + if self.reason_message is not None: + result['ReasonMessage'] = self.reason_message + if self.repository_url is not None: + result['RepositoryUrl'] = self.repository_url + if self.status is not None: + result['Status'] = self.status + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('ImageId') is not None: + self.image_id = m.get('ImageId') + if m.get('ImageName') is not None: + self.image_name = m.get('ImageName') + if m.get('ImageUrl') is not None: + self.image_url = m.get('ImageUrl') + if m.get('ReasonCode') is not None: + self.reason_code = m.get('ReasonCode') + if m.get('ReasonMessage') is not None: + self.reason_message = m.get('ReasonMessage') + if m.get('RepositoryUrl') is not None: + self.repository_url = m.get('RepositoryUrl') + if m.get('Status') is not None: + self.status = m.get('Status') + return self + + +class ListInstancesResponseBodyInstancesLabels(TeaModel): + def __init__( + self, + key: str = None, + value: str = None, + ): + self.key = key + self.value = value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + +class ListInstancesResponseBodyInstancesLatestSnapshot(TeaModel): + def __init__( + self, + gmt_create_time: str = None, + gmt_modified_time: str = None, + image_id: str = None, + image_name: str = None, + image_url: str = None, + reason_code: str = None, + reason_message: str = None, + repository_url: str = None, + status: str = None, + ): + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.image_id = image_id + self.image_name = image_name + self.image_url = image_url + self.reason_code = reason_code + self.reason_message = reason_message + self.repository_url = repository_url + self.status = status + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.image_id is not None: + result['ImageId'] = self.image_id + if self.image_name is not None: + result['ImageName'] = self.image_name + if self.image_url is not None: + result['ImageUrl'] = self.image_url + if self.reason_code is not None: + result['ReasonCode'] = self.reason_code + if self.reason_message is not None: + result['ReasonMessage'] = self.reason_message + if self.repository_url is not None: + result['RepositoryUrl'] = self.repository_url + if self.status is not None: + result['Status'] = self.status + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('ImageId') is not None: + self.image_id = m.get('ImageId') + if m.get('ImageName') is not None: + self.image_name = m.get('ImageName') + if m.get('ImageUrl') is not None: + self.image_url = m.get('ImageUrl') + if m.get('ReasonCode') is not None: + self.reason_code = m.get('ReasonCode') + if m.get('ReasonMessage') is not None: + self.reason_message = m.get('ReasonMessage') + if m.get('RepositoryUrl') is not None: + self.repository_url = m.get('RepositoryUrl') + if m.get('Status') is not None: + self.status = m.get('Status') + return self + + +class ListInstancesResponseBodyInstancesRequestedResource(TeaModel): + def __init__( + self, + cpu: str = None, + gpu: str = None, + gputype: str = None, + memory: str = None, + shared_memory: str = None, + ): + self.cpu = cpu + self.gpu = gpu + self.gputype = gputype + self.memory = memory + self.shared_memory = shared_memory + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.cpu is not None: + result['CPU'] = self.cpu + if self.gpu is not None: + result['GPU'] = self.gpu + if self.gputype is not None: + result['GPUType'] = self.gputype + if self.memory is not None: + result['Memory'] = self.memory + if self.shared_memory is not None: + result['SharedMemory'] = self.shared_memory + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('CPU') is not None: + self.cpu = m.get('CPU') + if m.get('GPU') is not None: + self.gpu = m.get('GPU') + if m.get('GPUType') is not None: + self.gputype = m.get('GPUType') + if m.get('Memory') is not None: + self.memory = m.get('Memory') + if m.get('SharedMemory') is not None: + self.shared_memory = m.get('SharedMemory') + return self + + +class ListInstancesResponseBodyInstancesUserVpc(TeaModel): + def __init__( + self, + default_route: str = None, + extended_cidrs: List[str] = None, + forward_infos: List[ForwardInfoResponse] = None, + security_group_id: str = None, + v_switch_id: str = None, + vpc_id: str = None, + ): + self.default_route = default_route + self.extended_cidrs = extended_cidrs + self.forward_infos = forward_infos + self.security_group_id = security_group_id + self.v_switch_id = v_switch_id + self.vpc_id = vpc_id + + def validate(self): + if self.forward_infos: + for k in self.forward_infos: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.default_route is not None: + result['DefaultRoute'] = self.default_route + if self.extended_cidrs is not None: + result['ExtendedCIDRs'] = self.extended_cidrs + result['ForwardInfos'] = [] + if self.forward_infos is not None: + for k in self.forward_infos: + result['ForwardInfos'].append(k.to_map() if k else None) + if self.security_group_id is not None: + result['SecurityGroupId'] = self.security_group_id + if self.v_switch_id is not None: + result['VSwitchId'] = self.v_switch_id + if self.vpc_id is not None: + result['VpcId'] = self.vpc_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('DefaultRoute') is not None: + self.default_route = m.get('DefaultRoute') + if m.get('ExtendedCIDRs') is not None: + self.extended_cidrs = m.get('ExtendedCIDRs') + self.forward_infos = [] + if m.get('ForwardInfos') is not None: + for k in m.get('ForwardInfos'): + temp_model = ForwardInfoResponse() + self.forward_infos.append(temp_model.from_map(k)) + if m.get('SecurityGroupId') is not None: + self.security_group_id = m.get('SecurityGroupId') + if m.get('VSwitchId') is not None: + self.v_switch_id = m.get('VSwitchId') + if m.get('VpcId') is not None: + self.vpc_id = m.get('VpcId') + return self + + +class ListInstancesResponseBodyInstances(TeaModel): + def __init__( + self, + accelerator_type: str = None, + accessibility: str = None, + accumulated_running_time_in_ms: int = None, + cloud_disks: List[ListInstancesResponseBodyInstancesCloudDisks] = None, + datasets: List[ListInstancesResponseBodyInstancesDatasets] = None, + driver: str = None, + ecs_spec: str = None, + environment_variables: Dict[str, str] = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + idle_instance_culler: ListInstancesResponseBodyInstancesIdleInstanceCuller = None, + image_id: str = None, + image_name: str = None, + image_url: str = None, + instance_id: str = None, + instance_name: str = None, + instance_shutdown_timer: ListInstancesResponseBodyInstancesInstanceShutdownTimer = None, + instance_snapshot_list: List[ListInstancesResponseBodyInstancesInstanceSnapshotList] = None, + instance_url: str = None, + jupyterlab_url: str = None, + labels: List[ListInstancesResponseBodyInstancesLabels] = None, + latest_snapshot: ListInstancesResponseBodyInstancesLatestSnapshot = None, + payment_type: str = None, + priority: int = None, + reason_code: str = None, + reason_message: str = None, + requested_resource: ListInstancesResponseBodyInstancesRequestedResource = None, + resource_id: str = None, + resource_name: str = None, + status: str = None, + terminal_url: str = None, + user_id: str = None, + user_name: str = None, + user_vpc: ListInstancesResponseBodyInstancesUserVpc = None, + web_ideurl: str = None, + workspace_id: str = None, + workspace_name: str = None, + workspace_source: str = None, + ): + self.accelerator_type = accelerator_type + self.accessibility = accessibility + self.accumulated_running_time_in_ms = accumulated_running_time_in_ms + self.cloud_disks = cloud_disks + self.datasets = datasets + self.driver = driver + self.ecs_spec = ecs_spec + self.environment_variables = environment_variables + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.idle_instance_culler = idle_instance_culler + self.image_id = image_id + self.image_name = image_name + self.image_url = image_url + self.instance_id = instance_id + self.instance_name = instance_name + self.instance_shutdown_timer = instance_shutdown_timer + self.instance_snapshot_list = instance_snapshot_list + self.instance_url = instance_url + # Jupyterlab Url。 + self.jupyterlab_url = jupyterlab_url + self.labels = labels + self.latest_snapshot = latest_snapshot + self.payment_type = payment_type + self.priority = priority + self.reason_code = reason_code + self.reason_message = reason_message + self.requested_resource = requested_resource + self.resource_id = resource_id + self.resource_name = resource_name + self.status = status + self.terminal_url = terminal_url + self.user_id = user_id + self.user_name = user_name + self.user_vpc = user_vpc + # Web IDE url。 + self.web_ideurl = web_ideurl + self.workspace_id = workspace_id + self.workspace_name = workspace_name + self.workspace_source = workspace_source + + def validate(self): + if self.cloud_disks: + for k in self.cloud_disks: + if k: + k.validate() + if self.datasets: + for k in self.datasets: + if k: + k.validate() + if self.idle_instance_culler: + self.idle_instance_culler.validate() + if self.instance_shutdown_timer: + self.instance_shutdown_timer.validate() + if self.instance_snapshot_list: + for k in self.instance_snapshot_list: + if k: + k.validate() + if self.labels: + for k in self.labels: + if k: + k.validate() + if self.latest_snapshot: + self.latest_snapshot.validate() + if self.requested_resource: + self.requested_resource.validate() + if self.user_vpc: + self.user_vpc.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.accelerator_type is not None: + result['AcceleratorType'] = self.accelerator_type + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + if self.accumulated_running_time_in_ms is not None: + result['AccumulatedRunningTimeInMs'] = self.accumulated_running_time_in_ms + result['CloudDisks'] = [] + if self.cloud_disks is not None: + for k in self.cloud_disks: + result['CloudDisks'].append(k.to_map() if k else None) + result['Datasets'] = [] + if self.datasets is not None: + for k in self.datasets: + result['Datasets'].append(k.to_map() if k else None) + if self.driver is not None: + result['Driver'] = self.driver + if self.ecs_spec is not None: + result['EcsSpec'] = self.ecs_spec + if self.environment_variables is not None: + result['EnvironmentVariables'] = self.environment_variables + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.idle_instance_culler is not None: + result['IdleInstanceCuller'] = self.idle_instance_culler.to_map() + if self.image_id is not None: + result['ImageId'] = self.image_id + if self.image_name is not None: + result['ImageName'] = self.image_name + if self.image_url is not None: + result['ImageUrl'] = self.image_url + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + if self.instance_name is not None: + result['InstanceName'] = self.instance_name + if self.instance_shutdown_timer is not None: + result['InstanceShutdownTimer'] = self.instance_shutdown_timer.to_map() + result['InstanceSnapshotList'] = [] + if self.instance_snapshot_list is not None: + for k in self.instance_snapshot_list: + result['InstanceSnapshotList'].append(k.to_map() if k else None) + if self.instance_url is not None: + result['InstanceUrl'] = self.instance_url + if self.jupyterlab_url is not None: + result['JupyterlabUrl'] = self.jupyterlab_url + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.latest_snapshot is not None: + result['LatestSnapshot'] = self.latest_snapshot.to_map() + if self.payment_type is not None: + result['PaymentType'] = self.payment_type + if self.priority is not None: + result['Priority'] = self.priority + if self.reason_code is not None: + result['ReasonCode'] = self.reason_code + if self.reason_message is not None: + result['ReasonMessage'] = self.reason_message + if self.requested_resource is not None: + result['RequestedResource'] = self.requested_resource.to_map() + if self.resource_id is not None: + result['ResourceId'] = self.resource_id + if self.resource_name is not None: + result['ResourceName'] = self.resource_name + if self.status is not None: + result['Status'] = self.status + if self.terminal_url is not None: + result['TerminalUrl'] = self.terminal_url + if self.user_id is not None: + result['UserId'] = self.user_id + if self.user_name is not None: + result['UserName'] = self.user_name + if self.user_vpc is not None: + result['UserVpc'] = self.user_vpc.to_map() + if self.web_ideurl is not None: + result['WebIDEUrl'] = self.web_ideurl + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id + if self.workspace_name is not None: + result['WorkspaceName'] = self.workspace_name + if self.workspace_source is not None: + result['WorkspaceSource'] = self.workspace_source + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('AcceleratorType') is not None: + self.accelerator_type = m.get('AcceleratorType') + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + if m.get('AccumulatedRunningTimeInMs') is not None: + self.accumulated_running_time_in_ms = m.get('AccumulatedRunningTimeInMs') + self.cloud_disks = [] + if m.get('CloudDisks') is not None: + for k in m.get('CloudDisks'): + temp_model = ListInstancesResponseBodyInstancesCloudDisks() + self.cloud_disks.append(temp_model.from_map(k)) + self.datasets = [] + if m.get('Datasets') is not None: + for k in m.get('Datasets'): + temp_model = ListInstancesResponseBodyInstancesDatasets() + self.datasets.append(temp_model.from_map(k)) + if m.get('Driver') is not None: + self.driver = m.get('Driver') + if m.get('EcsSpec') is not None: + self.ecs_spec = m.get('EcsSpec') + if m.get('EnvironmentVariables') is not None: + self.environment_variables = m.get('EnvironmentVariables') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('IdleInstanceCuller') is not None: + temp_model = ListInstancesResponseBodyInstancesIdleInstanceCuller() + self.idle_instance_culler = temp_model.from_map(m['IdleInstanceCuller']) + if m.get('ImageId') is not None: + self.image_id = m.get('ImageId') + if m.get('ImageName') is not None: + self.image_name = m.get('ImageName') + if m.get('ImageUrl') is not None: + self.image_url = m.get('ImageUrl') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + if m.get('InstanceName') is not None: + self.instance_name = m.get('InstanceName') + if m.get('InstanceShutdownTimer') is not None: + temp_model = ListInstancesResponseBodyInstancesInstanceShutdownTimer() + self.instance_shutdown_timer = temp_model.from_map(m['InstanceShutdownTimer']) + self.instance_snapshot_list = [] + if m.get('InstanceSnapshotList') is not None: + for k in m.get('InstanceSnapshotList'): + temp_model = ListInstancesResponseBodyInstancesInstanceSnapshotList() + self.instance_snapshot_list.append(temp_model.from_map(k)) + if m.get('InstanceUrl') is not None: + self.instance_url = m.get('InstanceUrl') + if m.get('JupyterlabUrl') is not None: + self.jupyterlab_url = m.get('JupyterlabUrl') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = ListInstancesResponseBodyInstancesLabels() + self.labels.append(temp_model.from_map(k)) + if m.get('LatestSnapshot') is not None: + temp_model = ListInstancesResponseBodyInstancesLatestSnapshot() + self.latest_snapshot = temp_model.from_map(m['LatestSnapshot']) + if m.get('PaymentType') is not None: + self.payment_type = m.get('PaymentType') + if m.get('Priority') is not None: + self.priority = m.get('Priority') + if m.get('ReasonCode') is not None: + self.reason_code = m.get('ReasonCode') + if m.get('ReasonMessage') is not None: + self.reason_message = m.get('ReasonMessage') + if m.get('RequestedResource') is not None: + temp_model = ListInstancesResponseBodyInstancesRequestedResource() + self.requested_resource = temp_model.from_map(m['RequestedResource']) + if m.get('ResourceId') is not None: + self.resource_id = m.get('ResourceId') + if m.get('ResourceName') is not None: + self.resource_name = m.get('ResourceName') + if m.get('Status') is not None: + self.status = m.get('Status') + if m.get('TerminalUrl') is not None: + self.terminal_url = m.get('TerminalUrl') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('UserName') is not None: + self.user_name = m.get('UserName') + if m.get('UserVpc') is not None: + temp_model = ListInstancesResponseBodyInstancesUserVpc() + self.user_vpc = temp_model.from_map(m['UserVpc']) + if m.get('WebIDEUrl') is not None: + self.web_ideurl = m.get('WebIDEUrl') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + if m.get('WorkspaceName') is not None: + self.workspace_name = m.get('WorkspaceName') + if m.get('WorkspaceSource') is not None: + self.workspace_source = m.get('WorkspaceSource') + return self + + +class ListInstancesResponseBody(TeaModel): + def __init__( + self, + code: str = None, + http_status_code: int = None, + instances: List[ListInstancesResponseBodyInstances] = None, + message: str = None, + request_id: str = None, + success: bool = None, + total_count: int = None, + ): + self.code = code + self.http_status_code = http_status_code + self.instances = instances + self.message = message + self.request_id = request_id + self.success = success + self.total_count = total_count + + def validate(self): + if self.instances: + for k in self.instances: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code is not None: + result['Code'] = self.code + if self.http_status_code is not None: + result['HttpStatusCode'] = self.http_status_code + result['Instances'] = [] + if self.instances is not None: + for k in self.instances: + result['Instances'].append(k.to_map() if k else None) + if self.message is not None: + result['Message'] = self.message + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.success is not None: + result['Success'] = self.success + if self.total_count is not None: + result['TotalCount'] = self.total_count + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('HttpStatusCode') is not None: + self.http_status_code = m.get('HttpStatusCode') + self.instances = [] + if m.get('Instances') is not None: + for k in m.get('Instances'): + temp_model = ListInstancesResponseBodyInstances() + self.instances.append(temp_model.from_map(k)) + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('Success') is not None: + self.success = m.get('Success') + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') + return self + + +class ListInstancesResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: ListInstancesResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = ListInstancesResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class StartInstanceResponseBody(TeaModel): + def __init__( + self, + code: str = None, + http_status_code: int = None, + instance_id: str = None, + message: str = None, + request_id: str = None, + success: bool = None, + ): + self.code = code + self.http_status_code = http_status_code + self.instance_id = instance_id + self.message = message + self.request_id = request_id + self.success = success + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code is not None: + result['Code'] = self.code + if self.http_status_code is not None: + result['HttpStatusCode'] = self.http_status_code + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + if self.message is not None: + result['Message'] = self.message + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.success is not None: + result['Success'] = self.success + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('HttpStatusCode') is not None: + self.http_status_code = m.get('HttpStatusCode') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('Success') is not None: + self.success = m.get('Success') + return self + + +class StartInstanceResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: StartInstanceResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = StartInstanceResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class StopInstanceRequest(TeaModel): + def __init__( + self, + save_image: bool = None, + ): + self.save_image = save_image + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.save_image is not None: + result['SaveImage'] = self.save_image + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('SaveImage') is not None: + self.save_image = m.get('SaveImage') + return self + + +class StopInstanceResponseBody(TeaModel): + def __init__( + self, + code: str = None, + http_status_code: int = None, + instance_id: str = None, + message: str = None, + request_id: str = None, + success: bool = None, + ): + self.code = code + self.http_status_code = http_status_code + self.instance_id = instance_id + self.message = message + self.request_id = request_id + self.success = success + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code is not None: + result['Code'] = self.code + if self.http_status_code is not None: + result['HttpStatusCode'] = self.http_status_code + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + if self.message is not None: + result['Message'] = self.message + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.success is not None: + result['Success'] = self.success + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('HttpStatusCode') is not None: + self.http_status_code = m.get('HttpStatusCode') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('Success') is not None: + self.success = m.get('Success') + return self + + +class StopInstanceResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: StopInstanceResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = StopInstanceResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class UpdateInstanceRequestCloudDisks(TeaModel): + def __init__( + self, + capacity: str = None, + sub_type: str = None, + ): + self.capacity = capacity + self.sub_type = sub_type + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.capacity is not None: + result['Capacity'] = self.capacity + if self.sub_type is not None: + result['SubType'] = self.sub_type + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Capacity') is not None: + self.capacity = m.get('Capacity') + if m.get('SubType') is not None: + self.sub_type = m.get('SubType') + return self + + +class UpdateInstanceRequestDatasets(TeaModel): + def __init__( + self, + dataset_id: str = None, + mount_path: str = None, + ): + self.dataset_id = dataset_id + self.mount_path = mount_path + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.dataset_id is not None: + result['DatasetId'] = self.dataset_id + if self.mount_path is not None: + result['MountPath'] = self.mount_path + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('DatasetId') is not None: + self.dataset_id = m.get('DatasetId') + if m.get('MountPath') is not None: + self.mount_path = m.get('MountPath') + return self + + +class UpdateInstanceRequestRequestedResource(TeaModel): + def __init__( + self, + cpu: str = None, + gpu: str = None, + gputype: str = None, + memory: str = None, + shared_memory: str = None, + ): + self.cpu = cpu + self.gpu = gpu + self.gputype = gputype + self.memory = memory + self.shared_memory = shared_memory + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.cpu is not None: + result['CPU'] = self.cpu + if self.gpu is not None: + result['GPU'] = self.gpu + if self.gputype is not None: + result['GPUType'] = self.gputype + if self.memory is not None: + result['Memory'] = self.memory + if self.shared_memory is not None: + result['SharedMemory'] = self.shared_memory + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('CPU') is not None: + self.cpu = m.get('CPU') + if m.get('GPU') is not None: + self.gpu = m.get('GPU') + if m.get('GPUType') is not None: + self.gputype = m.get('GPUType') + if m.get('Memory') is not None: + self.memory = m.get('Memory') + if m.get('SharedMemory') is not None: + self.shared_memory = m.get('SharedMemory') + return self + + +class UpdateInstanceRequestUserVpc(TeaModel): + def __init__( + self, + default_route: str = None, + extended_cidrs: List[str] = None, + forward_infos: List[ForwardInfo] = None, + security_group_id: str = None, + v_switch_id: str = None, + vpc_id: str = None, + ): + self.default_route = default_route + self.extended_cidrs = extended_cidrs + self.forward_infos = forward_infos + self.security_group_id = security_group_id + self.v_switch_id = v_switch_id + self.vpc_id = vpc_id + + def validate(self): + if self.forward_infos: + for k in self.forward_infos: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.default_route is not None: + result['DefaultRoute'] = self.default_route + if self.extended_cidrs is not None: + result['ExtendedCIDRs'] = self.extended_cidrs + result['ForwardInfos'] = [] + if self.forward_infos is not None: + for k in self.forward_infos: + result['ForwardInfos'].append(k.to_map() if k else None) + if self.security_group_id is not None: + result['SecurityGroupId'] = self.security_group_id + if self.v_switch_id is not None: + result['VSwitchId'] = self.v_switch_id + if self.vpc_id is not None: + result['VpcId'] = self.vpc_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('DefaultRoute') is not None: + self.default_route = m.get('DefaultRoute') + if m.get('ExtendedCIDRs') is not None: + self.extended_cidrs = m.get('ExtendedCIDRs') + self.forward_infos = [] + if m.get('ForwardInfos') is not None: + for k in m.get('ForwardInfos'): + temp_model = ForwardInfo() + self.forward_infos.append(temp_model.from_map(k)) + if m.get('SecurityGroupId') is not None: + self.security_group_id = m.get('SecurityGroupId') + if m.get('VSwitchId') is not None: + self.v_switch_id = m.get('VSwitchId') + if m.get('VpcId') is not None: + self.vpc_id = m.get('VpcId') + return self + + +class UpdateInstanceRequest(TeaModel): + def __init__( + self, + accessibility: str = None, + cloud_disks: List[UpdateInstanceRequestCloudDisks] = None, + datasets: List[UpdateInstanceRequestDatasets] = None, + disassociate_datasets: bool = None, + disassociate_driver: bool = None, + disassociate_forward_infos: bool = None, + disassociate_vpc: bool = None, + driver: str = None, + ecs_spec: str = None, + image_id: str = None, + image_url: str = None, + instance_name: str = None, + priority: int = None, + requested_resource: UpdateInstanceRequestRequestedResource = None, + user_id: str = None, + user_vpc: UpdateInstanceRequestUserVpc = None, + workspace_source: str = None, + ): + self.accessibility = accessibility + self.cloud_disks = cloud_disks + self.datasets = datasets + self.disassociate_datasets = disassociate_datasets + self.disassociate_driver = disassociate_driver + self.disassociate_forward_infos = disassociate_forward_infos + self.disassociate_vpc = disassociate_vpc + self.driver = driver + self.ecs_spec = ecs_spec + self.image_id = image_id + self.image_url = image_url + self.instance_name = instance_name + self.priority = priority + self.requested_resource = requested_resource + self.user_id = user_id + self.user_vpc = user_vpc + self.workspace_source = workspace_source + + def validate(self): + if self.cloud_disks: + for k in self.cloud_disks: + if k: + k.validate() + if self.datasets: + for k in self.datasets: + if k: + k.validate() + if self.requested_resource: + self.requested_resource.validate() + if self.user_vpc: + self.user_vpc.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.accessibility is not None: + result['Accessibility'] = self.accessibility + result['CloudDisks'] = [] + if self.cloud_disks is not None: + for k in self.cloud_disks: + result['CloudDisks'].append(k.to_map() if k else None) + result['Datasets'] = [] + if self.datasets is not None: + for k in self.datasets: + result['Datasets'].append(k.to_map() if k else None) + if self.disassociate_datasets is not None: + result['DisassociateDatasets'] = self.disassociate_datasets + if self.disassociate_driver is not None: + result['DisassociateDriver'] = self.disassociate_driver + if self.disassociate_forward_infos is not None: + result['DisassociateForwardInfos'] = self.disassociate_forward_infos + if self.disassociate_vpc is not None: + result['DisassociateVpc'] = self.disassociate_vpc + if self.driver is not None: + result['Driver'] = self.driver + if self.ecs_spec is not None: + result['EcsSpec'] = self.ecs_spec + if self.image_id is not None: + result['ImageId'] = self.image_id + if self.image_url is not None: + result['ImageUrl'] = self.image_url + if self.instance_name is not None: + result['InstanceName'] = self.instance_name + if self.priority is not None: + result['Priority'] = self.priority + if self.requested_resource is not None: + result['RequestedResource'] = self.requested_resource.to_map() + if self.user_id is not None: + result['UserId'] = self.user_id + if self.user_vpc is not None: + result['UserVpc'] = self.user_vpc.to_map() + if self.workspace_source is not None: + result['WorkspaceSource'] = self.workspace_source + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Accessibility') is not None: + self.accessibility = m.get('Accessibility') + self.cloud_disks = [] + if m.get('CloudDisks') is not None: + for k in m.get('CloudDisks'): + temp_model = UpdateInstanceRequestCloudDisks() + self.cloud_disks.append(temp_model.from_map(k)) + self.datasets = [] + if m.get('Datasets') is not None: + for k in m.get('Datasets'): + temp_model = UpdateInstanceRequestDatasets() + self.datasets.append(temp_model.from_map(k)) + if m.get('DisassociateDatasets') is not None: + self.disassociate_datasets = m.get('DisassociateDatasets') + if m.get('DisassociateDriver') is not None: + self.disassociate_driver = m.get('DisassociateDriver') + if m.get('DisassociateForwardInfos') is not None: + self.disassociate_forward_infos = m.get('DisassociateForwardInfos') + if m.get('DisassociateVpc') is not None: + self.disassociate_vpc = m.get('DisassociateVpc') + if m.get('Driver') is not None: + self.driver = m.get('Driver') + if m.get('EcsSpec') is not None: + self.ecs_spec = m.get('EcsSpec') + if m.get('ImageId') is not None: + self.image_id = m.get('ImageId') + if m.get('ImageUrl') is not None: + self.image_url = m.get('ImageUrl') + if m.get('InstanceName') is not None: + self.instance_name = m.get('InstanceName') + if m.get('Priority') is not None: + self.priority = m.get('Priority') + if m.get('RequestedResource') is not None: + temp_model = UpdateInstanceRequestRequestedResource() + self.requested_resource = temp_model.from_map(m['RequestedResource']) + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('UserVpc') is not None: + temp_model = UpdateInstanceRequestUserVpc() + self.user_vpc = temp_model.from_map(m['UserVpc']) + if m.get('WorkspaceSource') is not None: + self.workspace_source = m.get('WorkspaceSource') + return self + + +class UpdateInstanceResponseBody(TeaModel): + def __init__( + self, + code: str = None, + http_status_code: int = None, + instance_id: str = None, + message: str = None, + request_id: str = None, + success: bool = None, + ): + self.code = code + self.http_status_code = http_status_code + self.instance_id = instance_id + self.message = message + self.request_id = request_id + self.success = success + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code is not None: + result['Code'] = self.code + if self.http_status_code is not None: + result['HttpStatusCode'] = self.http_status_code + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + if self.message is not None: + result['Message'] = self.message + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.success is not None: + result['Success'] = self.success + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('HttpStatusCode') is not None: + self.http_status_code = m.get('HttpStatusCode') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('Success') is not None: + self.success = m.get('Success') + return self + + +class UpdateInstanceResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: UpdateInstanceResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = UpdateInstanceResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + diff --git a/pai/libs/alibabacloud_paistudio20220112/__init__.py b/pai/libs/alibabacloud_paistudio20220112/__init__.py index d706e92..d294ca8 100644 --- a/pai/libs/alibabacloud_paistudio20220112/__init__.py +++ b/pai/libs/alibabacloud_paistudio20220112/__init__.py @@ -1 +1 @@ -__version__ = '1.0.13' \ No newline at end of file +__version__ = '1.1.7' \ No newline at end of file diff --git a/pai/libs/alibabacloud_paistudio20220112/client.py b/pai/libs/alibabacloud_paistudio20220112/client.py index 5427831..63832fb 100644 --- a/pai/libs/alibabacloud_paistudio20220112/client.py +++ b/pai/libs/alibabacloud_paistudio20220112/client.py @@ -13,7 +13,6 @@ from pai.libs.alibabacloud_paistudio20220112 import models as pai_studio_20220112_models - class Client(OpenApiClient): """ *\ @@ -61,11 +60,130 @@ def get_endpoint( return endpoint_map.get(region_id) return EndpointUtilClient.get_endpoint_rules(product_id, region_id, endpoint_rule, network, suffix) + def check_instance_web_terminal_with_options( + self, + training_job_id: str, + instance_id: str, + request: pai_studio_20220112_models.CheckInstanceWebTerminalRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.CheckInstanceWebTerminalResponse: + """ + @summary 检查WebTerminal + + @param request: CheckInstanceWebTerminalRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CheckInstanceWebTerminalResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.check_info): + body['CheckInfo'] = request.check_info + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='CheckInstanceWebTerminal', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/trainingjobs/{OpenApiUtilClient.get_encode_param(training_job_id)}/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/webterminals/action/check', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.CheckInstanceWebTerminalResponse(), + self.call_api(params, req, runtime) + ) + + async def check_instance_web_terminal_with_options_async( + self, + training_job_id: str, + instance_id: str, + request: pai_studio_20220112_models.CheckInstanceWebTerminalRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.CheckInstanceWebTerminalResponse: + """ + @summary 检查WebTerminal + + @param request: CheckInstanceWebTerminalRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CheckInstanceWebTerminalResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.check_info): + body['CheckInfo'] = request.check_info + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='CheckInstanceWebTerminal', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/trainingjobs/{OpenApiUtilClient.get_encode_param(training_job_id)}/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/webterminals/action/check', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.CheckInstanceWebTerminalResponse(), + await self.call_api_async(params, req, runtime) + ) + + def check_instance_web_terminal( + self, + training_job_id: str, + instance_id: str, + request: pai_studio_20220112_models.CheckInstanceWebTerminalRequest, + ) -> pai_studio_20220112_models.CheckInstanceWebTerminalResponse: + """ + @summary 检查WebTerminal + + @param request: CheckInstanceWebTerminalRequest + @return: CheckInstanceWebTerminalResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.check_instance_web_terminal_with_options(training_job_id, instance_id, request, headers, runtime) + + async def check_instance_web_terminal_async( + self, + training_job_id: str, + instance_id: str, + request: pai_studio_20220112_models.CheckInstanceWebTerminalRequest, + ) -> pai_studio_20220112_models.CheckInstanceWebTerminalResponse: + """ + @summary 检查WebTerminal + + @param request: CheckInstanceWebTerminalRequest + @return: CheckInstanceWebTerminalResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.check_instance_web_terminal_with_options_async(training_job_id, instance_id, request, headers, runtime) + def create_ai4ddefault_bucket_with_options( self, headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.CreateAI4DDefaultBucketResponse: + """ + @summary 创建AI4D模型桶 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateAI4DDefaultBucketResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -90,6 +208,13 @@ async def create_ai4ddefault_bucket_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.CreateAI4DDefaultBucketResponse: + """ + @summary 创建AI4D模型桶 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateAI4DDefaultBucketResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -110,11 +235,21 @@ async def create_ai4ddefault_bucket_with_options_async( ) def create_ai4ddefault_bucket(self) -> pai_studio_20220112_models.CreateAI4DDefaultBucketResponse: + """ + @summary 创建AI4D模型桶 + + @return: CreateAI4DDefaultBucketResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.create_ai4ddefault_bucket_with_options(headers, runtime) async def create_ai4ddefault_bucket_async(self) -> pai_studio_20220112_models.CreateAI4DDefaultBucketResponse: + """ + @summary 创建AI4D模型桶 + + @return: CreateAI4DDefaultBucketResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.create_ai4ddefault_bucket_with_options_async(headers, runtime) @@ -125,6 +260,14 @@ def create_ai4dserivce_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.CreateAI4DSerivceResponse: + """ + @summary 创建AI4D服务 + + @param request: CreateAI4DSerivceRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateAI4DSerivceResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.inference_spec): @@ -159,6 +302,14 @@ async def create_ai4dserivce_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.CreateAI4DSerivceResponse: + """ + @summary 创建AI4D服务 + + @param request: CreateAI4DSerivceRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateAI4DSerivceResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.inference_spec): @@ -191,6 +342,12 @@ def create_ai4dserivce( self, request: pai_studio_20220112_models.CreateAI4DSerivceRequest, ) -> pai_studio_20220112_models.CreateAI4DSerivceResponse: + """ + @summary 创建AI4D服务 + + @param request: CreateAI4DSerivceRequest + @return: CreateAI4DSerivceResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.create_ai4dserivce_with_options(request, headers, runtime) @@ -199,6 +356,12 @@ async def create_ai4dserivce_async( self, request: pai_studio_20220112_models.CreateAI4DSerivceRequest, ) -> pai_studio_20220112_models.CreateAI4DSerivceResponse: + """ + @summary 创建AI4D服务 + + @param request: CreateAI4DSerivceRequest + @return: CreateAI4DSerivceResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.create_ai4dserivce_with_options_async(request, headers, runtime) @@ -209,6 +372,14 @@ def create_algorithm_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.CreateAlgorithmResponse: + """ + @summary 创建新的算法 + + @param request: CreateAlgorithmRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateAlgorithmResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.algorithm_description): @@ -245,6 +416,14 @@ async def create_algorithm_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.CreateAlgorithmResponse: + """ + @summary 创建新的算法 + + @param request: CreateAlgorithmRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateAlgorithmResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.algorithm_description): @@ -279,6 +458,12 @@ def create_algorithm( self, request: pai_studio_20220112_models.CreateAlgorithmRequest, ) -> pai_studio_20220112_models.CreateAlgorithmResponse: + """ + @summary 创建新的算法 + + @param request: CreateAlgorithmRequest + @return: CreateAlgorithmResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.create_algorithm_with_options(request, headers, runtime) @@ -287,6 +472,12 @@ async def create_algorithm_async( self, request: pai_studio_20220112_models.CreateAlgorithmRequest, ) -> pai_studio_20220112_models.CreateAlgorithmResponse: + """ + @summary 创建新的算法 + + @param request: CreateAlgorithmRequest + @return: CreateAlgorithmResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.create_algorithm_with_options_async(request, headers, runtime) @@ -299,6 +490,14 @@ def create_algorithm_version_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.CreateAlgorithmVersionResponse: + """ + @summary 创建一个新的算法版本 + + @param tmp_req: CreateAlgorithmVersionRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateAlgorithmVersionResponse + """ UtilClient.validate_model(tmp_req) request = pai_studio_20220112_models.CreateAlgorithmVersionShrinkRequest() OpenApiUtilClient.convert(tmp_req, request) @@ -335,6 +534,14 @@ async def create_algorithm_version_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.CreateAlgorithmVersionResponse: + """ + @summary 创建一个新的算法版本 + + @param tmp_req: CreateAlgorithmVersionRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateAlgorithmVersionResponse + """ UtilClient.validate_model(tmp_req) request = pai_studio_20220112_models.CreateAlgorithmVersionShrinkRequest() OpenApiUtilClient.convert(tmp_req, request) @@ -369,6 +576,12 @@ def create_algorithm_version( algorithm_version: str, request: pai_studio_20220112_models.CreateAlgorithmVersionRequest, ) -> pai_studio_20220112_models.CreateAlgorithmVersionResponse: + """ + @summary 创建一个新的算法版本 + + @param request: CreateAlgorithmVersionRequest + @return: CreateAlgorithmVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.create_algorithm_version_with_options(algorithm_id, algorithm_version, request, headers, runtime) @@ -379,6 +592,12 @@ async def create_algorithm_version_async( algorithm_version: str, request: pai_studio_20220112_models.CreateAlgorithmVersionRequest, ) -> pai_studio_20220112_models.CreateAlgorithmVersionResponse: + """ + @summary 创建一个新的算法版本 + + @param request: CreateAlgorithmVersionRequest + @return: CreateAlgorithmVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.create_algorithm_version_with_options_async(algorithm_id, algorithm_version, request, headers, runtime) @@ -389,6 +608,14 @@ def create_component_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.CreateComponentResponse: + """ + @summary 创建组件 + + @param request: CreateComponentRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateComponentResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.description): @@ -427,6 +654,14 @@ async def create_component_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.CreateComponentResponse: + """ + @summary 创建组件 + + @param request: CreateComponentRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateComponentResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.description): @@ -463,6 +698,12 @@ def create_component( self, request: pai_studio_20220112_models.CreateComponentRequest, ) -> pai_studio_20220112_models.CreateComponentResponse: + """ + @summary 创建组件 + + @param request: CreateComponentRequest + @return: CreateComponentResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.create_component_with_options(request, headers, runtime) @@ -471,6 +712,12 @@ async def create_component_async( self, request: pai_studio_20220112_models.CreateComponentRequest, ) -> pai_studio_20220112_models.CreateComponentResponse: + """ + @summary 创建组件 + + @param request: CreateComponentRequest + @return: CreateComponentResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.create_component_with_options_async(request, headers, runtime) @@ -482,6 +729,14 @@ def create_component_version_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.CreateComponentVersionResponse: + """ + @summary 创建组件版本 + + @param request: CreateComponentVersionRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateComponentVersionResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.config_dir): @@ -521,6 +776,14 @@ async def create_component_version_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.CreateComponentVersionResponse: + """ + @summary 创建组件版本 + + @param request: CreateComponentVersionRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateComponentVersionResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.config_dir): @@ -558,6 +821,12 @@ def create_component_version( component_id: str, request: pai_studio_20220112_models.CreateComponentVersionRequest, ) -> pai_studio_20220112_models.CreateComponentVersionResponse: + """ + @summary 创建组件版本 + + @param request: CreateComponentVersionRequest + @return: CreateComponentVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.create_component_version_with_options(component_id, request, headers, runtime) @@ -567,16 +836,124 @@ async def create_component_version_async( component_id: str, request: pai_studio_20220112_models.CreateComponentVersionRequest, ) -> pai_studio_20220112_models.CreateComponentVersionResponse: + """ + @summary 创建组件版本 + + @param request: CreateComponentVersionRequest + @return: CreateComponentVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.create_component_version_with_options_async(component_id, request, headers, runtime) + def create_instance_web_terminal_with_options( + self, + training_job_id: str, + instance_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.CreateInstanceWebTerminalResponse: + """ + @summary 创建WebTerminal + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateInstanceWebTerminalResponse + """ + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='CreateInstanceWebTerminal', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/trainingjobs/{OpenApiUtilClient.get_encode_param(training_job_id)}/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/webterminals', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.CreateInstanceWebTerminalResponse(), + self.call_api(params, req, runtime) + ) + + async def create_instance_web_terminal_with_options_async( + self, + training_job_id: str, + instance_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.CreateInstanceWebTerminalResponse: + """ + @summary 创建WebTerminal + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateInstanceWebTerminalResponse + """ + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='CreateInstanceWebTerminal', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/trainingjobs/{OpenApiUtilClient.get_encode_param(training_job_id)}/instances/{OpenApiUtilClient.get_encode_param(instance_id)}/webterminals', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.CreateInstanceWebTerminalResponse(), + await self.call_api_async(params, req, runtime) + ) + + def create_instance_web_terminal( + self, + training_job_id: str, + instance_id: str, + ) -> pai_studio_20220112_models.CreateInstanceWebTerminalResponse: + """ + @summary 创建WebTerminal + + @return: CreateInstanceWebTerminalResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.create_instance_web_terminal_with_options(training_job_id, instance_id, headers, runtime) + + async def create_instance_web_terminal_async( + self, + training_job_id: str, + instance_id: str, + ) -> pai_studio_20220112_models.CreateInstanceWebTerminalResponse: + """ + @summary 创建WebTerminal + + @return: CreateInstanceWebTerminalResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.create_instance_web_terminal_with_options_async(training_job_id, instance_id, headers, runtime) + def create_quota_with_options( self, request: pai_studio_20220112_models.CreateQuotaRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.CreateQuotaResponse: + """ + @summary 创建Quota + + @param request: CreateQuotaRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateQuotaResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.allocate_strategy): @@ -589,6 +966,10 @@ def create_quota_with_options( body['Min'] = request.min if not UtilClient.is_unset(request.parent_quota_id): body['ParentQuotaId'] = request.parent_quota_id + if not UtilClient.is_unset(request.queue_strategy): + body['QueueStrategy'] = request.queue_strategy + if not UtilClient.is_unset(request.quota_config): + body['QuotaConfig'] = request.quota_config if not UtilClient.is_unset(request.quota_name): body['QuotaName'] = request.quota_name if not UtilClient.is_unset(request.resource_group_ids): @@ -621,6 +1002,14 @@ async def create_quota_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.CreateQuotaResponse: + """ + @summary 创建Quota + + @param request: CreateQuotaRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateQuotaResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.allocate_strategy): @@ -633,6 +1022,10 @@ async def create_quota_with_options_async( body['Min'] = request.min if not UtilClient.is_unset(request.parent_quota_id): body['ParentQuotaId'] = request.parent_quota_id + if not UtilClient.is_unset(request.queue_strategy): + body['QueueStrategy'] = request.queue_strategy + if not UtilClient.is_unset(request.quota_config): + body['QuotaConfig'] = request.quota_config if not UtilClient.is_unset(request.quota_name): body['QuotaName'] = request.quota_name if not UtilClient.is_unset(request.resource_group_ids): @@ -663,6 +1056,12 @@ def create_quota( self, request: pai_studio_20220112_models.CreateQuotaRequest, ) -> pai_studio_20220112_models.CreateQuotaResponse: + """ + @summary 创建Quota + + @param request: CreateQuotaRequest + @return: CreateQuotaResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.create_quota_with_options(request, headers, runtime) @@ -671,6 +1070,12 @@ async def create_quota_async( self, request: pai_studio_20220112_models.CreateQuotaRequest, ) -> pai_studio_20220112_models.CreateQuotaResponse: + """ + @summary 创建Quota + + @param request: CreateQuotaRequest + @return: CreateQuotaResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.create_quota_with_options_async(request, headers, runtime) @@ -681,6 +1086,14 @@ def create_resource_group_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.CreateResourceGroupResponse: + """ + @summary 创建资源组 + + @param request: CreateResourceGroupRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateResourceGroupResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.computing_resource_provider): @@ -691,6 +1104,8 @@ def create_resource_group_with_options( body['Name'] = request.name if not UtilClient.is_unset(request.resource_type): body['ResourceType'] = request.resource_type + if not UtilClient.is_unset(request.tag): + body['Tag'] = request.tag if not UtilClient.is_unset(request.user_vpc): body['UserVpc'] = request.user_vpc req = open_api_models.OpenApiRequest( @@ -719,6 +1134,14 @@ async def create_resource_group_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.CreateResourceGroupResponse: + """ + @summary 创建资源组 + + @param request: CreateResourceGroupRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateResourceGroupResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.computing_resource_provider): @@ -729,6 +1152,8 @@ async def create_resource_group_with_options_async( body['Name'] = request.name if not UtilClient.is_unset(request.resource_type): body['ResourceType'] = request.resource_type + if not UtilClient.is_unset(request.tag): + body['Tag'] = request.tag if not UtilClient.is_unset(request.user_vpc): body['UserVpc'] = request.user_vpc req = open_api_models.OpenApiRequest( @@ -755,6 +1180,12 @@ def create_resource_group( self, request: pai_studio_20220112_models.CreateResourceGroupRequest, ) -> pai_studio_20220112_models.CreateResourceGroupResponse: + """ + @summary 创建资源组 + + @param request: CreateResourceGroupRequest + @return: CreateResourceGroupResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.create_resource_group_with_options(request, headers, runtime) @@ -763,16 +1194,162 @@ async def create_resource_group_async( self, request: pai_studio_20220112_models.CreateResourceGroupRequest, ) -> pai_studio_20220112_models.CreateResourceGroupResponse: + """ + @summary 创建资源组 + + @param request: CreateResourceGroupRequest + @return: CreateResourceGroupResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.create_resource_group_with_options_async(request, headers, runtime) + def create_resource_group_machine_group_with_options( + self, + resource_group_id: str, + request: pai_studio_20220112_models.CreateResourceGroupMachineGroupRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.CreateResourceGroupMachineGroupResponse: + """ + @summary 创建机器组 + + @param request: CreateResourceGroupMachineGroupRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateResourceGroupMachineGroupResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.ecs_count): + body['EcsCount'] = request.ecs_count + if not UtilClient.is_unset(request.ecs_spec): + body['EcsSpec'] = request.ecs_spec + if not UtilClient.is_unset(request.name): + body['Name'] = request.name + if not UtilClient.is_unset(request.payment_duration): + body['PaymentDuration'] = request.payment_duration + if not UtilClient.is_unset(request.payment_duration_unit): + body['PaymentDurationUnit'] = request.payment_duration_unit + if not UtilClient.is_unset(request.payment_type): + body['PaymentType'] = request.payment_type + if not UtilClient.is_unset(request.tag): + body['Tag'] = request.tag + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='CreateResourceGroupMachineGroup', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/resources/{OpenApiUtilClient.get_encode_param(resource_group_id)}/machinegroups', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.CreateResourceGroupMachineGroupResponse(), + self.call_api(params, req, runtime) + ) + + async def create_resource_group_machine_group_with_options_async( + self, + resource_group_id: str, + request: pai_studio_20220112_models.CreateResourceGroupMachineGroupRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.CreateResourceGroupMachineGroupResponse: + """ + @summary 创建机器组 + + @param request: CreateResourceGroupMachineGroupRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateResourceGroupMachineGroupResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.ecs_count): + body['EcsCount'] = request.ecs_count + if not UtilClient.is_unset(request.ecs_spec): + body['EcsSpec'] = request.ecs_spec + if not UtilClient.is_unset(request.name): + body['Name'] = request.name + if not UtilClient.is_unset(request.payment_duration): + body['PaymentDuration'] = request.payment_duration + if not UtilClient.is_unset(request.payment_duration_unit): + body['PaymentDurationUnit'] = request.payment_duration_unit + if not UtilClient.is_unset(request.payment_type): + body['PaymentType'] = request.payment_type + if not UtilClient.is_unset(request.tag): + body['Tag'] = request.tag + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='CreateResourceGroupMachineGroup', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/resources/{OpenApiUtilClient.get_encode_param(resource_group_id)}/machinegroups', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.CreateResourceGroupMachineGroupResponse(), + await self.call_api_async(params, req, runtime) + ) + + def create_resource_group_machine_group( + self, + resource_group_id: str, + request: pai_studio_20220112_models.CreateResourceGroupMachineGroupRequest, + ) -> pai_studio_20220112_models.CreateResourceGroupMachineGroupResponse: + """ + @summary 创建机器组 + + @param request: CreateResourceGroupMachineGroupRequest + @return: CreateResourceGroupMachineGroupResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.create_resource_group_machine_group_with_options(resource_group_id, request, headers, runtime) + + async def create_resource_group_machine_group_async( + self, + resource_group_id: str, + request: pai_studio_20220112_models.CreateResourceGroupMachineGroupRequest, + ) -> pai_studio_20220112_models.CreateResourceGroupMachineGroupResponse: + """ + @summary 创建机器组 + + @param request: CreateResourceGroupMachineGroupRequest + @return: CreateResourceGroupMachineGroupResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.create_resource_group_machine_group_with_options_async(resource_group_id, request, headers, runtime) + def create_service_identity_role_with_options( self, request: pai_studio_20220112_models.CreateServiceIdentityRoleRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.CreateServiceIdentityRoleResponse: + """ + @summary 创建服务认证角色 + + @param request: CreateServiceIdentityRoleRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateServiceIdentityRoleResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.role_name): @@ -803,6 +1380,14 @@ async def create_service_identity_role_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.CreateServiceIdentityRoleResponse: + """ + @summary 创建服务认证角色 + + @param request: CreateServiceIdentityRoleRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateServiceIdentityRoleResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.role_name): @@ -831,6 +1416,12 @@ def create_service_identity_role( self, request: pai_studio_20220112_models.CreateServiceIdentityRoleRequest, ) -> pai_studio_20220112_models.CreateServiceIdentityRoleResponse: + """ + @summary 创建服务认证角色 + + @param request: CreateServiceIdentityRoleRequest + @return: CreateServiceIdentityRoleResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.create_service_identity_role_with_options(request, headers, runtime) @@ -839,6 +1430,12 @@ async def create_service_identity_role_async( self, request: pai_studio_20220112_models.CreateServiceIdentityRoleRequest, ) -> pai_studio_20220112_models.CreateServiceIdentityRoleResponse: + """ + @summary 创建服务认证角色 + + @param request: CreateServiceIdentityRoleRequest + @return: CreateServiceIdentityRoleResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.create_service_identity_role_with_options_async(request, headers, runtime) @@ -849,6 +1446,14 @@ def create_training_job_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.CreateTrainingJobResponse: + """ + @summary 创建TrainingJob + + @param request: CreateTrainingJobRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateTrainingJobResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.algorithm_name): @@ -863,6 +1468,10 @@ def create_training_job_with_options( body['CodeDir'] = request.code_dir if not UtilClient.is_unset(request.compute_resource): body['ComputeResource'] = request.compute_resource + if not UtilClient.is_unset(request.environments): + body['Environments'] = request.environments + if not UtilClient.is_unset(request.experiment_config): + body['ExperimentConfig'] = request.experiment_config if not UtilClient.is_unset(request.hyper_parameters): body['HyperParameters'] = request.hyper_parameters if not UtilClient.is_unset(request.input_channels): @@ -871,10 +1480,14 @@ def create_training_job_with_options( body['Labels'] = request.labels if not UtilClient.is_unset(request.output_channels): body['OutputChannels'] = request.output_channels + if not UtilClient.is_unset(request.python_requirements): + body['PythonRequirements'] = request.python_requirements if not UtilClient.is_unset(request.role_arn): body['RoleArn'] = request.role_arn if not UtilClient.is_unset(request.scheduler): body['Scheduler'] = request.scheduler + if not UtilClient.is_unset(request.settings): + body['Settings'] = request.settings if not UtilClient.is_unset(request.training_job_description): body['TrainingJobDescription'] = request.training_job_description if not UtilClient.is_unset(request.training_job_name): @@ -909,6 +1522,14 @@ async def create_training_job_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.CreateTrainingJobResponse: + """ + @summary 创建TrainingJob + + @param request: CreateTrainingJobRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: CreateTrainingJobResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.algorithm_name): @@ -923,6 +1544,10 @@ async def create_training_job_with_options_async( body['CodeDir'] = request.code_dir if not UtilClient.is_unset(request.compute_resource): body['ComputeResource'] = request.compute_resource + if not UtilClient.is_unset(request.environments): + body['Environments'] = request.environments + if not UtilClient.is_unset(request.experiment_config): + body['ExperimentConfig'] = request.experiment_config if not UtilClient.is_unset(request.hyper_parameters): body['HyperParameters'] = request.hyper_parameters if not UtilClient.is_unset(request.input_channels): @@ -931,10 +1556,14 @@ async def create_training_job_with_options_async( body['Labels'] = request.labels if not UtilClient.is_unset(request.output_channels): body['OutputChannels'] = request.output_channels + if not UtilClient.is_unset(request.python_requirements): + body['PythonRequirements'] = request.python_requirements if not UtilClient.is_unset(request.role_arn): body['RoleArn'] = request.role_arn if not UtilClient.is_unset(request.scheduler): body['Scheduler'] = request.scheduler + if not UtilClient.is_unset(request.settings): + body['Settings'] = request.settings if not UtilClient.is_unset(request.training_job_description): body['TrainingJobDescription'] = request.training_job_description if not UtilClient.is_unset(request.training_job_name): @@ -967,6 +1596,12 @@ def create_training_job( self, request: pai_studio_20220112_models.CreateTrainingJobRequest, ) -> pai_studio_20220112_models.CreateTrainingJobResponse: + """ + @summary 创建TrainingJob + + @param request: CreateTrainingJobRequest + @return: CreateTrainingJobResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.create_training_job_with_options(request, headers, runtime) @@ -975,6 +1610,12 @@ async def create_training_job_async( self, request: pai_studio_20220112_models.CreateTrainingJobRequest, ) -> pai_studio_20220112_models.CreateTrainingJobResponse: + """ + @summary 创建TrainingJob + + @param request: CreateTrainingJobRequest + @return: CreateTrainingJobResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.create_training_job_with_options_async(request, headers, runtime) @@ -985,6 +1626,13 @@ def delete_algorithm_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.DeleteAlgorithmResponse: + """ + @summary 删除算法 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteAlgorithmResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1010,6 +1658,13 @@ async def delete_algorithm_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.DeleteAlgorithmResponse: + """ + @summary 删除算法 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteAlgorithmResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1033,6 +1688,11 @@ def delete_algorithm( self, algorithm_id: str, ) -> pai_studio_20220112_models.DeleteAlgorithmResponse: + """ + @summary 删除算法 + + @return: DeleteAlgorithmResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.delete_algorithm_with_options(algorithm_id, headers, runtime) @@ -1041,6 +1701,11 @@ async def delete_algorithm_async( self, algorithm_id: str, ) -> pai_studio_20220112_models.DeleteAlgorithmResponse: + """ + @summary 删除算法 + + @return: DeleteAlgorithmResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.delete_algorithm_with_options_async(algorithm_id, headers, runtime) @@ -1052,6 +1717,13 @@ def delete_algorithm_version_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.DeleteAlgorithmVersionResponse: + """ + @summary 删除算法版本 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteAlgorithmVersionResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1078,6 +1750,13 @@ async def delete_algorithm_version_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.DeleteAlgorithmVersionResponse: + """ + @summary 删除算法版本 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteAlgorithmVersionResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1102,6 +1781,11 @@ def delete_algorithm_version( algorithm_id: str, algorithm_version: str, ) -> pai_studio_20220112_models.DeleteAlgorithmVersionResponse: + """ + @summary 删除算法版本 + + @return: DeleteAlgorithmVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.delete_algorithm_version_with_options(algorithm_id, algorithm_version, headers, runtime) @@ -1111,6 +1795,11 @@ async def delete_algorithm_version_async( algorithm_id: str, algorithm_version: str, ) -> pai_studio_20220112_models.DeleteAlgorithmVersionResponse: + """ + @summary 删除算法版本 + + @return: DeleteAlgorithmVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.delete_algorithm_version_with_options_async(algorithm_id, algorithm_version, headers, runtime) @@ -1121,6 +1810,13 @@ def delete_component_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.DeleteComponentResponse: + """ + @summary 删除组件 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteComponentResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1146,6 +1842,13 @@ async def delete_component_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.DeleteComponentResponse: + """ + @summary 删除组件 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteComponentResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1169,6 +1872,11 @@ def delete_component( self, component_id: str, ) -> pai_studio_20220112_models.DeleteComponentResponse: + """ + @summary 删除组件 + + @return: DeleteComponentResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.delete_component_with_options(component_id, headers, runtime) @@ -1177,6 +1885,11 @@ async def delete_component_async( self, component_id: str, ) -> pai_studio_20220112_models.DeleteComponentResponse: + """ + @summary 删除组件 + + @return: DeleteComponentResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.delete_component_with_options_async(component_id, headers, runtime) @@ -1188,6 +1901,13 @@ def delete_component_version_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.DeleteComponentVersionResponse: + """ + @summary 删除组件版本 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteComponentVersionResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1214,6 +1934,13 @@ async def delete_component_version_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.DeleteComponentVersionResponse: + """ + @summary 删除组件版本 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteComponentVersionResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1238,6 +1965,11 @@ def delete_component_version( component_id: str, version: str, ) -> pai_studio_20220112_models.DeleteComponentVersionResponse: + """ + @summary 删除组件版本 + + @return: DeleteComponentVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.delete_component_version_with_options(component_id, version, headers, runtime) @@ -1247,6 +1979,11 @@ async def delete_component_version_async( component_id: str, version: str, ) -> pai_studio_20220112_models.DeleteComponentVersionResponse: + """ + @summary 删除组件版本 + + @return: DeleteComponentVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.delete_component_version_with_options_async(component_id, version, headers, runtime) @@ -1257,6 +1994,13 @@ def delete_component_version_snapshot_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.DeleteComponentVersionSnapshotResponse: + """ + @summary 删除组件版本快照 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteComponentVersionSnapshotResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1282,6 +2026,13 @@ async def delete_component_version_snapshot_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.DeleteComponentVersionSnapshotResponse: + """ + @summary 删除组件版本快照 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteComponentVersionSnapshotResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1305,6 +2056,11 @@ def delete_component_version_snapshot( self, snapshot_id: str, ) -> pai_studio_20220112_models.DeleteComponentVersionSnapshotResponse: + """ + @summary 删除组件版本快照 + + @return: DeleteComponentVersionSnapshotResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.delete_component_version_snapshot_with_options(snapshot_id, headers, runtime) @@ -1313,6 +2069,11 @@ async def delete_component_version_snapshot_async( self, snapshot_id: str, ) -> pai_studio_20220112_models.DeleteComponentVersionSnapshotResponse: + """ + @summary 删除组件版本快照 + + @return: DeleteComponentVersionSnapshotResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.delete_component_version_snapshot_with_options_async(snapshot_id, headers, runtime) @@ -1323,6 +2084,13 @@ def delete_machine_group_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.DeleteMachineGroupResponse: + """ + @summary delete machine group + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteMachineGroupResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1348,6 +2116,13 @@ async def delete_machine_group_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.DeleteMachineGroupResponse: + """ + @summary delete machine group + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteMachineGroupResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1371,6 +2146,11 @@ def delete_machine_group( self, machine_group_id: str, ) -> pai_studio_20220112_models.DeleteMachineGroupResponse: + """ + @summary delete machine group + + @return: DeleteMachineGroupResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.delete_machine_group_with_options(machine_group_id, headers, runtime) @@ -1379,6 +2159,11 @@ async def delete_machine_group_async( self, machine_group_id: str, ) -> pai_studio_20220112_models.DeleteMachineGroupResponse: + """ + @summary delete machine group + + @return: DeleteMachineGroupResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.delete_machine_group_with_options_async(machine_group_id, headers, runtime) @@ -1389,6 +2174,13 @@ def delete_quota_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.DeleteQuotaResponse: + """ + @summary 删除Quota + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteQuotaResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1414,6 +2206,13 @@ async def delete_quota_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.DeleteQuotaResponse: + """ + @summary 删除Quota + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteQuotaResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1437,6 +2236,11 @@ def delete_quota( self, quota_id: str, ) -> pai_studio_20220112_models.DeleteQuotaResponse: + """ + @summary 删除Quota + + @return: DeleteQuotaResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.delete_quota_with_options(quota_id, headers, runtime) @@ -1445,6 +2249,11 @@ async def delete_quota_async( self, quota_id: str, ) -> pai_studio_20220112_models.DeleteQuotaResponse: + """ + @summary 删除Quota + + @return: DeleteQuotaResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.delete_quota_with_options_async(quota_id, headers, runtime) @@ -1456,6 +2265,14 @@ def delete_quota_labels_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.DeleteQuotaLabelsResponse: + """ + @summary 删除Quota标签 + + @param request: DeleteQuotaLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteQuotaLabelsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.keys): @@ -1487,6 +2304,14 @@ async def delete_quota_labels_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.DeleteQuotaLabelsResponse: + """ + @summary 删除Quota标签 + + @param request: DeleteQuotaLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteQuotaLabelsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.keys): @@ -1516,6 +2341,12 @@ def delete_quota_labels( quota_id: str, request: pai_studio_20220112_models.DeleteQuotaLabelsRequest, ) -> pai_studio_20220112_models.DeleteQuotaLabelsResponse: + """ + @summary 删除Quota标签 + + @param request: DeleteQuotaLabelsRequest + @return: DeleteQuotaLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.delete_quota_labels_with_options(quota_id, request, headers, runtime) @@ -1525,6 +2356,12 @@ async def delete_quota_labels_async( quota_id: str, request: pai_studio_20220112_models.DeleteQuotaLabelsRequest, ) -> pai_studio_20220112_models.DeleteQuotaLabelsResponse: + """ + @summary 删除Quota标签 + + @param request: DeleteQuotaLabelsRequest + @return: DeleteQuotaLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.delete_quota_labels_with_options_async(quota_id, request, headers, runtime) @@ -1535,6 +2372,13 @@ def delete_resource_group_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.DeleteResourceGroupResponse: + """ + @summary 删除资源组 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteResourceGroupResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1560,6 +2404,13 @@ async def delete_resource_group_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.DeleteResourceGroupResponse: + """ + @summary 删除资源组 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteResourceGroupResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1583,6 +2434,11 @@ def delete_resource_group( self, resource_group_id: str, ) -> pai_studio_20220112_models.DeleteResourceGroupResponse: + """ + @summary 删除资源组 + + @return: DeleteResourceGroupResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.delete_resource_group_with_options(resource_group_id, headers, runtime) @@ -1591,6 +2447,11 @@ async def delete_resource_group_async( self, resource_group_id: str, ) -> pai_studio_20220112_models.DeleteResourceGroupResponse: + """ + @summary 删除资源组 + + @return: DeleteResourceGroupResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.delete_resource_group_with_options_async(resource_group_id, headers, runtime) @@ -1602,6 +2463,13 @@ def delete_resource_group_machine_group_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.DeleteResourceGroupMachineGroupResponse: + """ + @summary delete machine group + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteResourceGroupMachineGroupResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1628,6 +2496,13 @@ async def delete_resource_group_machine_group_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.DeleteResourceGroupMachineGroupResponse: + """ + @summary delete machine group + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteResourceGroupMachineGroupResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1652,6 +2527,11 @@ def delete_resource_group_machine_group( machine_group_id: str, resource_group_id: str, ) -> pai_studio_20220112_models.DeleteResourceGroupMachineGroupResponse: + """ + @summary delete machine group + + @return: DeleteResourceGroupMachineGroupResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.delete_resource_group_machine_group_with_options(machine_group_id, resource_group_id, headers, runtime) @@ -1661,6 +2541,11 @@ async def delete_resource_group_machine_group_async( machine_group_id: str, resource_group_id: str, ) -> pai_studio_20220112_models.DeleteResourceGroupMachineGroupResponse: + """ + @summary delete machine group + + @return: DeleteResourceGroupMachineGroupResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.delete_resource_group_machine_group_with_options_async(machine_group_id, resource_group_id, headers, runtime) @@ -1671,6 +2556,13 @@ def delete_training_job_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.DeleteTrainingJobResponse: + """ + @summary 删除一个TrainingJob + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteTrainingJobResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1696,6 +2588,13 @@ async def delete_training_job_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.DeleteTrainingJobResponse: + """ + @summary 删除一个TrainingJob + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteTrainingJobResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1719,6 +2618,11 @@ def delete_training_job( self, training_job_id: str, ) -> pai_studio_20220112_models.DeleteTrainingJobResponse: + """ + @summary 删除一个TrainingJob + + @return: DeleteTrainingJobResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.delete_training_job_with_options(training_job_id, headers, runtime) @@ -1727,6 +2631,11 @@ async def delete_training_job_async( self, training_job_id: str, ) -> pai_studio_20220112_models.DeleteTrainingJobResponse: + """ + @summary 删除一个TrainingJob + + @return: DeleteTrainingJobResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.delete_training_job_with_options_async(training_job_id, headers, runtime) @@ -1738,6 +2647,14 @@ def delete_training_job_labels_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.DeleteTrainingJobLabelsResponse: + """ + @summary 删除TrainingJob的Labels + + @param request: DeleteTrainingJobLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteTrainingJobLabelsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.keys): @@ -1769,6 +2686,14 @@ async def delete_training_job_labels_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.DeleteTrainingJobLabelsResponse: + """ + @summary 删除TrainingJob的Labels + + @param request: DeleteTrainingJobLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: DeleteTrainingJobLabelsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.keys): @@ -1798,6 +2723,12 @@ def delete_training_job_labels( training_job_id: str, request: pai_studio_20220112_models.DeleteTrainingJobLabelsRequest, ) -> pai_studio_20220112_models.DeleteTrainingJobLabelsResponse: + """ + @summary 删除TrainingJob的Labels + + @param request: DeleteTrainingJobLabelsRequest + @return: DeleteTrainingJobLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.delete_training_job_labels_with_options(training_job_id, request, headers, runtime) @@ -1807,6 +2738,12 @@ async def delete_training_job_labels_async( training_job_id: str, request: pai_studio_20220112_models.DeleteTrainingJobLabelsRequest, ) -> pai_studio_20220112_models.DeleteTrainingJobLabelsResponse: + """ + @summary 删除TrainingJob的Labels + + @param request: DeleteTrainingJobLabelsRequest + @return: DeleteTrainingJobLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.delete_training_job_labels_with_options_async(training_job_id, request, headers, runtime) @@ -1816,6 +2753,13 @@ def get_ai4ddefault_bucket_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetAI4DDefaultBucketResponse: + """ + @summary 获取AI4D模型桶 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetAI4DDefaultBucketResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1840,6 +2784,13 @@ async def get_ai4ddefault_bucket_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetAI4DDefaultBucketResponse: + """ + @summary 获取AI4D模型桶 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetAI4DDefaultBucketResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1860,11 +2811,21 @@ async def get_ai4ddefault_bucket_with_options_async( ) def get_ai4ddefault_bucket(self) -> pai_studio_20220112_models.GetAI4DDefaultBucketResponse: + """ + @summary 获取AI4D模型桶 + + @return: GetAI4DDefaultBucketResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_ai4ddefault_bucket_with_options(headers, runtime) async def get_ai4ddefault_bucket_async(self) -> pai_studio_20220112_models.GetAI4DDefaultBucketResponse: + """ + @summary 获取AI4D模型桶 + + @return: GetAI4DDefaultBucketResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.get_ai4ddefault_bucket_with_options_async(headers, runtime) @@ -1875,6 +2836,13 @@ def get_algorithm_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetAlgorithmResponse: + """ + @summary 获取一个算法信息 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetAlgorithmResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1900,6 +2868,13 @@ async def get_algorithm_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetAlgorithmResponse: + """ + @summary 获取一个算法信息 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetAlgorithmResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1923,6 +2898,11 @@ def get_algorithm( self, algorithm_id: str, ) -> pai_studio_20220112_models.GetAlgorithmResponse: + """ + @summary 获取一个算法信息 + + @return: GetAlgorithmResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_algorithm_with_options(algorithm_id, headers, runtime) @@ -1931,6 +2911,11 @@ async def get_algorithm_async( self, algorithm_id: str, ) -> pai_studio_20220112_models.GetAlgorithmResponse: + """ + @summary 获取一个算法信息 + + @return: GetAlgorithmResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.get_algorithm_with_options_async(algorithm_id, headers, runtime) @@ -1942,6 +2927,13 @@ def get_algorithm_version_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetAlgorithmVersionResponse: + """ + @summary 创建一个新的算法版本 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetAlgorithmVersionResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1968,6 +2960,13 @@ async def get_algorithm_version_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetAlgorithmVersionResponse: + """ + @summary 创建一个新的算法版本 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetAlgorithmVersionResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -1992,6 +2991,11 @@ def get_algorithm_version( algorithm_id: str, algorithm_version: str, ) -> pai_studio_20220112_models.GetAlgorithmVersionResponse: + """ + @summary 创建一个新的算法版本 + + @return: GetAlgorithmVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_algorithm_version_with_options(algorithm_id, algorithm_version, headers, runtime) @@ -2001,6 +3005,11 @@ async def get_algorithm_version_async( algorithm_id: str, algorithm_version: str, ) -> pai_studio_20220112_models.GetAlgorithmVersionResponse: + """ + @summary 创建一个新的算法版本 + + @return: GetAlgorithmVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.get_algorithm_version_with_options_async(algorithm_id, algorithm_version, headers, runtime) @@ -2011,6 +3020,13 @@ def get_component_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetComponentResponse: + """ + @summary 查询组件信息 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetComponentResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -2036,6 +3052,13 @@ async def get_component_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetComponentResponse: + """ + @summary 查询组件信息 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetComponentResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -2059,6 +3082,11 @@ def get_component( self, component_id: str, ) -> pai_studio_20220112_models.GetComponentResponse: + """ + @summary 查询组件信息 + + @return: GetComponentResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_component_with_options(component_id, headers, runtime) @@ -2067,6 +3095,11 @@ async def get_component_async( self, component_id: str, ) -> pai_studio_20220112_models.GetComponentResponse: + """ + @summary 查询组件信息 + + @return: GetComponentResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.get_component_with_options_async(component_id, headers, runtime) @@ -2078,6 +3111,13 @@ def get_component_version_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetComponentVersionResponse: + """ + @summary 获取组件版本 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetComponentVersionResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -2104,6 +3144,13 @@ async def get_component_version_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetComponentVersionResponse: + """ + @summary 获取组件版本 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetComponentVersionResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -2128,6 +3175,11 @@ def get_component_version( component_id: str, version: str, ) -> pai_studio_20220112_models.GetComponentVersionResponse: + """ + @summary 获取组件版本 + + @return: GetComponentVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_component_version_with_options(component_id, version, headers, runtime) @@ -2137,6 +3189,11 @@ async def get_component_version_async( component_id: str, version: str, ) -> pai_studio_20220112_models.GetComponentVersionResponse: + """ + @summary 获取组件版本 + + @return: GetComponentVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.get_component_version_with_options_async(component_id, version, headers, runtime) @@ -2147,6 +3204,13 @@ def get_component_version_snapshot_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetComponentVersionSnapshotResponse: + """ + @summary 获取组件版本快照 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetComponentVersionSnapshotResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -2172,6 +3236,13 @@ async def get_component_version_snapshot_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetComponentVersionSnapshotResponse: + """ + @summary 获取组件版本快照 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetComponentVersionSnapshotResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -2195,6 +3266,11 @@ def get_component_version_snapshot( self, snapshot_id: str, ) -> pai_studio_20220112_models.GetComponentVersionSnapshotResponse: + """ + @summary 获取组件版本快照 + + @return: GetComponentVersionSnapshotResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_component_version_snapshot_with_options(snapshot_id, headers, runtime) @@ -2203,6 +3279,11 @@ async def get_component_version_snapshot_async( self, snapshot_id: str, ) -> pai_studio_20220112_models.GetComponentVersionSnapshotResponse: + """ + @summary 获取组件版本快照 + + @return: GetComponentVersionSnapshotResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.get_component_version_snapshot_with_options_async(snapshot_id, headers, runtime) @@ -2213,6 +3294,13 @@ def get_instance_job_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetInstanceJobResponse: + """ + @summary 获取实例任务 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetInstanceJobResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -2238,6 +3326,13 @@ async def get_instance_job_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetInstanceJobResponse: + """ + @summary 获取实例任务 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetInstanceJobResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -2261,6 +3356,11 @@ def get_instance_job( self, instance_job_id: str, ) -> pai_studio_20220112_models.GetInstanceJobResponse: + """ + @summary 获取实例任务 + + @return: GetInstanceJobResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_instance_job_with_options(instance_job_id, headers, runtime) @@ -2269,6 +3369,11 @@ async def get_instance_job_async( self, instance_job_id: str, ) -> pai_studio_20220112_models.GetInstanceJobResponse: + """ + @summary 获取实例任务 + + @return: GetInstanceJobResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.get_instance_job_with_options_async(instance_job_id, headers, runtime) @@ -2280,6 +3385,14 @@ def get_job_view_metrics_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetJobViewMetricsResponse: + """ + @summary 按照job来统计性能指标 + + @param request: GetJobViewMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetJobViewMetricsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.end_time): @@ -2323,6 +3436,14 @@ async def get_job_view_metrics_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetJobViewMetricsResponse: + """ + @summary 按照job来统计性能指标 + + @param request: GetJobViewMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetJobViewMetricsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.end_time): @@ -2364,6 +3485,12 @@ def get_job_view_metrics( resource_group_id: str, request: pai_studio_20220112_models.GetJobViewMetricsRequest, ) -> pai_studio_20220112_models.GetJobViewMetricsResponse: + """ + @summary 按照job来统计性能指标 + + @param request: GetJobViewMetricsRequest + @return: GetJobViewMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_job_view_metrics_with_options(resource_group_id, request, headers, runtime) @@ -2373,6 +3500,12 @@ async def get_job_view_metrics_async( resource_group_id: str, request: pai_studio_20220112_models.GetJobViewMetricsRequest, ) -> pai_studio_20220112_models.GetJobViewMetricsResponse: + """ + @summary 按照job来统计性能指标 + + @param request: GetJobViewMetricsRequest + @return: GetJobViewMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.get_job_view_metrics_with_options_async(resource_group_id, request, headers, runtime) @@ -2384,6 +3517,14 @@ def get_jobs_statistics_by_quota_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetJobsStatisticsByQuotaResponse: + """ + @summary 获取当前资源配额的作业统计信息 + + @param request: GetJobsStatisticsByQuotaRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetJobsStatisticsByQuotaResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.end_time): @@ -2419,6 +3560,14 @@ async def get_jobs_statistics_by_quota_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetJobsStatisticsByQuotaResponse: + """ + @summary 获取当前资源配额的作业统计信息 + + @param request: GetJobsStatisticsByQuotaRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetJobsStatisticsByQuotaResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.end_time): @@ -2452,6 +3601,12 @@ def get_jobs_statistics_by_quota( quota_id: str, request: pai_studio_20220112_models.GetJobsStatisticsByQuotaRequest, ) -> pai_studio_20220112_models.GetJobsStatisticsByQuotaResponse: + """ + @summary 获取当前资源配额的作业统计信息 + + @param request: GetJobsStatisticsByQuotaRequest + @return: GetJobsStatisticsByQuotaResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_jobs_statistics_by_quota_with_options(quota_id, request, headers, runtime) @@ -2461,6 +3616,12 @@ async def get_jobs_statistics_by_quota_async( quota_id: str, request: pai_studio_20220112_models.GetJobsStatisticsByQuotaRequest, ) -> pai_studio_20220112_models.GetJobsStatisticsByQuotaResponse: + """ + @summary 获取当前资源配额的作业统计信息 + + @param request: GetJobsStatisticsByQuotaRequest + @return: GetJobsStatisticsByQuotaResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.get_jobs_statistics_by_quota_with_options_async(quota_id, request, headers, runtime) @@ -2472,6 +3633,14 @@ def get_jobs_statistics_by_resource_group_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetJobsStatisticsByResourceGroupResponse: + """ + @summary 按照resource group,查询Job的状态统计信息 + + @param request: GetJobsStatisticsByResourceGroupRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetJobsStatisticsByResourceGroupResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.end_time): @@ -2507,6 +3676,14 @@ async def get_jobs_statistics_by_resource_group_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetJobsStatisticsByResourceGroupResponse: + """ + @summary 按照resource group,查询Job的状态统计信息 + + @param request: GetJobsStatisticsByResourceGroupRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetJobsStatisticsByResourceGroupResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.end_time): @@ -2540,6 +3717,12 @@ def get_jobs_statistics_by_resource_group( resource_group_id: str, request: pai_studio_20220112_models.GetJobsStatisticsByResourceGroupRequest, ) -> pai_studio_20220112_models.GetJobsStatisticsByResourceGroupResponse: + """ + @summary 按照resource group,查询Job的状态统计信息 + + @param request: GetJobsStatisticsByResourceGroupRequest + @return: GetJobsStatisticsByResourceGroupResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_jobs_statistics_by_resource_group_with_options(resource_group_id, request, headers, runtime) @@ -2549,6 +3732,12 @@ async def get_jobs_statistics_by_resource_group_async( resource_group_id: str, request: pai_studio_20220112_models.GetJobsStatisticsByResourceGroupRequest, ) -> pai_studio_20220112_models.GetJobsStatisticsByResourceGroupResponse: + """ + @summary 按照resource group,查询Job的状态统计信息 + + @param request: GetJobsStatisticsByResourceGroupRequest + @return: GetJobsStatisticsByResourceGroupResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.get_jobs_statistics_by_resource_group_with_options_async(resource_group_id, request, headers, runtime) @@ -2559,6 +3748,13 @@ def get_machine_group_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetMachineGroupResponse: + """ + @summary get machine group + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetMachineGroupResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -2584,6 +3780,13 @@ async def get_machine_group_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetMachineGroupResponse: + """ + @summary get machine group + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetMachineGroupResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -2607,6 +3810,11 @@ def get_machine_group( self, machine_group_id: str, ) -> pai_studio_20220112_models.GetMachineGroupResponse: + """ + @summary get machine group + + @return: GetMachineGroupResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_machine_group_with_options(machine_group_id, headers, runtime) @@ -2615,39 +3823,58 @@ async def get_machine_group_async( self, machine_group_id: str, ) -> pai_studio_20220112_models.GetMachineGroupResponse: + """ + @summary get machine group + + @return: GetMachineGroupResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.get_machine_group_with_options_async(machine_group_id, headers, runtime) - def get_node_metrics_with_options( + def get_metrics_with_options( self, - resource_group_id: str, - metric_type: str, - request: pai_studio_20220112_models.GetNodeMetricsRequest, + request: pai_studio_20220112_models.GetMetricsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> pai_studio_20220112_models.GetNodeMetricsResponse: + ) -> pai_studio_20220112_models.GetMetricsResponse: + """ + @summary 云监控 DescribeMetricList 代理 API + + @param request: GetMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetMetricsResponse + """ UtilClient.validate_model(request) query = {} + if not UtilClient.is_unset(request.dimensions): + query['Dimensions'] = request.dimensions if not UtilClient.is_unset(request.end_time): query['EndTime'] = request.end_time - if not UtilClient.is_unset(request.gputype): - query['GPUType'] = request.gputype + if not UtilClient.is_unset(request.express): + query['Express'] = request.express + if not UtilClient.is_unset(request.length): + query['Length'] = request.length + if not UtilClient.is_unset(request.metric_name): + query['MetricName'] = request.metric_name + if not UtilClient.is_unset(request.namespace): + query['Namespace'] = request.namespace + if not UtilClient.is_unset(request.next_token): + query['NextToken'] = request.next_token + if not UtilClient.is_unset(request.period): + query['Period'] = request.period if not UtilClient.is_unset(request.start_time): query['StartTime'] = request.start_time - if not UtilClient.is_unset(request.time_step): - query['TimeStep'] = request.time_step - if not UtilClient.is_unset(request.verbose): - query['Verbose'] = request.verbose req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='GetNodeMetrics', + action='GetMetrics', version='2022-01-12', protocol='HTTPS', - pathname=f'/api/v1/resources/{OpenApiUtilClient.get_encode_param(resource_group_id)}/nodemetrics/{OpenApiUtilClient.get_encode_param(metric_type)}', + pathname=f'/api/v1/quotas/cms/metrics', method='GET', auth_type='AK', style='ROA', @@ -2655,39 +3882,53 @@ def get_node_metrics_with_options( body_type='json' ) return TeaCore.from_map( - pai_studio_20220112_models.GetNodeMetricsResponse(), + pai_studio_20220112_models.GetMetricsResponse(), self.call_api(params, req, runtime) ) - async def get_node_metrics_with_options_async( + async def get_metrics_with_options_async( self, - resource_group_id: str, - metric_type: str, - request: pai_studio_20220112_models.GetNodeMetricsRequest, + request: pai_studio_20220112_models.GetMetricsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> pai_studio_20220112_models.GetNodeMetricsResponse: + ) -> pai_studio_20220112_models.GetMetricsResponse: + """ + @summary 云监控 DescribeMetricList 代理 API + + @param request: GetMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetMetricsResponse + """ UtilClient.validate_model(request) query = {} + if not UtilClient.is_unset(request.dimensions): + query['Dimensions'] = request.dimensions if not UtilClient.is_unset(request.end_time): query['EndTime'] = request.end_time - if not UtilClient.is_unset(request.gputype): - query['GPUType'] = request.gputype + if not UtilClient.is_unset(request.express): + query['Express'] = request.express + if not UtilClient.is_unset(request.length): + query['Length'] = request.length + if not UtilClient.is_unset(request.metric_name): + query['MetricName'] = request.metric_name + if not UtilClient.is_unset(request.namespace): + query['Namespace'] = request.namespace + if not UtilClient.is_unset(request.next_token): + query['NextToken'] = request.next_token + if not UtilClient.is_unset(request.period): + query['Period'] = request.period if not UtilClient.is_unset(request.start_time): query['StartTime'] = request.start_time - if not UtilClient.is_unset(request.time_step): - query['TimeStep'] = request.time_step - if not UtilClient.is_unset(request.verbose): - query['Verbose'] = request.verbose req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='GetNodeMetrics', + action='GetMetrics', version='2022-01-12', protocol='HTTPS', - pathname=f'/api/v1/resources/{OpenApiUtilClient.get_encode_param(resource_group_id)}/nodemetrics/{OpenApiUtilClient.get_encode_param(metric_type)}', + pathname=f'/api/v1/quotas/cms/metrics', method='GET', auth_type='AK', style='ROA', @@ -2695,58 +3936,72 @@ async def get_node_metrics_with_options_async( body_type='json' ) return TeaCore.from_map( - pai_studio_20220112_models.GetNodeMetricsResponse(), + pai_studio_20220112_models.GetMetricsResponse(), await self.call_api_async(params, req, runtime) ) - def get_node_metrics( + def get_metrics( self, - resource_group_id: str, - metric_type: str, - request: pai_studio_20220112_models.GetNodeMetricsRequest, - ) -> pai_studio_20220112_models.GetNodeMetricsResponse: + request: pai_studio_20220112_models.GetMetricsRequest, + ) -> pai_studio_20220112_models.GetMetricsResponse: + """ + @summary 云监控 DescribeMetricList 代理 API + + @param request: GetMetricsRequest + @return: GetMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.get_node_metrics_with_options(resource_group_id, metric_type, request, headers, runtime) + return self.get_metrics_with_options(request, headers, runtime) - async def get_node_metrics_async( + async def get_metrics_async( self, - resource_group_id: str, - metric_type: str, - request: pai_studio_20220112_models.GetNodeMetricsRequest, - ) -> pai_studio_20220112_models.GetNodeMetricsResponse: + request: pai_studio_20220112_models.GetMetricsRequest, + ) -> pai_studio_20220112_models.GetMetricsResponse: + """ + @summary 云监控 DescribeMetricList 代理 API + + @param request: GetMetricsRequest + @return: GetMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.get_node_metrics_with_options_async(resource_group_id, metric_type, request, headers, runtime) + return await self.get_metrics_with_options_async(request, headers, runtime) - def get_node_view_metrics_with_options( + def get_node_gpumetrics_with_options( self, - resource_group_id: str, - request: pai_studio_20220112_models.GetNodeViewMetricsRequest, + node_id: str, + request: pai_studio_20220112_models.GetNodeGPUMetricsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> pai_studio_20220112_models.GetNodeViewMetricsResponse: + ) -> pai_studio_20220112_models.GetNodeGPUMetricsResponse: + """ + @summary 查询节点的GPU指标 + + @param request: GetNodeGPUMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetNodeGPUMetricsResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.node_id): - query['NodeId'] = request.node_id - if not UtilClient.is_unset(request.page_number): - query['PageNumber'] = request.page_number - if not UtilClient.is_unset(request.page_size): - query['PageSize'] = request.page_size - if not UtilClient.is_unset(request.time_step): - query['TimeStep'] = request.time_step - if not UtilClient.is_unset(request.workspace_id): - query['WorkspaceId'] = request.workspace_id + if not UtilClient.is_unset(request.end_time): + query['EndTime'] = request.end_time + if not UtilClient.is_unset(request.metric_type): + query['MetricType'] = request.metric_type + if not UtilClient.is_unset(request.quota_id): + query['QuotaId'] = request.quota_id + if not UtilClient.is_unset(request.start_time): + query['StartTime'] = request.start_time req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='GetNodeViewMetrics', + action='GetNodeGPUMetrics', version='2022-01-12', protocol='HTTPS', - pathname=f'/api/v1/resources/{OpenApiUtilClient.get_encode_param(resource_group_id)}/nodeviewmetrics', + pathname=f'/api/v1/nodes/{OpenApiUtilClient.get_encode_param(node_id)}/gpumetrics', method='GET', auth_type='AK', style='ROA', @@ -2754,29 +4009,287 @@ def get_node_view_metrics_with_options( body_type='json' ) return TeaCore.from_map( - pai_studio_20220112_models.GetNodeViewMetricsResponse(), + pai_studio_20220112_models.GetNodeGPUMetricsResponse(), self.call_api(params, req, runtime) ) - async def get_node_view_metrics_with_options_async( + async def get_node_gpumetrics_with_options_async( self, - resource_group_id: str, - request: pai_studio_20220112_models.GetNodeViewMetricsRequest, + node_id: str, + request: pai_studio_20220112_models.GetNodeGPUMetricsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> pai_studio_20220112_models.GetNodeViewMetricsResponse: + ) -> pai_studio_20220112_models.GetNodeGPUMetricsResponse: + """ + @summary 查询节点的GPU指标 + + @param request: GetNodeGPUMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetNodeGPUMetricsResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.node_id): - query['NodeId'] = request.node_id - if not UtilClient.is_unset(request.page_number): - query['PageNumber'] = request.page_number - if not UtilClient.is_unset(request.page_size): - query['PageSize'] = request.page_size - if not UtilClient.is_unset(request.time_step): - query['TimeStep'] = request.time_step - if not UtilClient.is_unset(request.workspace_id): - query['WorkspaceId'] = request.workspace_id + if not UtilClient.is_unset(request.end_time): + query['EndTime'] = request.end_time + if not UtilClient.is_unset(request.metric_type): + query['MetricType'] = request.metric_type + if not UtilClient.is_unset(request.quota_id): + query['QuotaId'] = request.quota_id + if not UtilClient.is_unset(request.start_time): + query['StartTime'] = request.start_time + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='GetNodeGPUMetrics', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/nodes/{OpenApiUtilClient.get_encode_param(node_id)}/gpumetrics', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.GetNodeGPUMetricsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def get_node_gpumetrics( + self, + node_id: str, + request: pai_studio_20220112_models.GetNodeGPUMetricsRequest, + ) -> pai_studio_20220112_models.GetNodeGPUMetricsResponse: + """ + @summary 查询节点的GPU指标 + + @param request: GetNodeGPUMetricsRequest + @return: GetNodeGPUMetricsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.get_node_gpumetrics_with_options(node_id, request, headers, runtime) + + async def get_node_gpumetrics_async( + self, + node_id: str, + request: pai_studio_20220112_models.GetNodeGPUMetricsRequest, + ) -> pai_studio_20220112_models.GetNodeGPUMetricsResponse: + """ + @summary 查询节点的GPU指标 + + @param request: GetNodeGPUMetricsRequest + @return: GetNodeGPUMetricsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.get_node_gpumetrics_with_options_async(node_id, request, headers, runtime) + + def get_node_metrics_with_options( + self, + resource_group_id: str, + metric_type: str, + request: pai_studio_20220112_models.GetNodeMetricsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.GetNodeMetricsResponse: + """ + @summary get resource group node metrics + + @param request: GetNodeMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetNodeMetricsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.end_time): + query['EndTime'] = request.end_time + if not UtilClient.is_unset(request.gputype): + query['GPUType'] = request.gputype + if not UtilClient.is_unset(request.start_time): + query['StartTime'] = request.start_time + if not UtilClient.is_unset(request.time_step): + query['TimeStep'] = request.time_step + if not UtilClient.is_unset(request.verbose): + query['Verbose'] = request.verbose + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='GetNodeMetrics', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/resources/{OpenApiUtilClient.get_encode_param(resource_group_id)}/nodemetrics/{OpenApiUtilClient.get_encode_param(metric_type)}', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.GetNodeMetricsResponse(), + self.call_api(params, req, runtime) + ) + + async def get_node_metrics_with_options_async( + self, + resource_group_id: str, + metric_type: str, + request: pai_studio_20220112_models.GetNodeMetricsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.GetNodeMetricsResponse: + """ + @summary get resource group node metrics + + @param request: GetNodeMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetNodeMetricsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.end_time): + query['EndTime'] = request.end_time + if not UtilClient.is_unset(request.gputype): + query['GPUType'] = request.gputype + if not UtilClient.is_unset(request.start_time): + query['StartTime'] = request.start_time + if not UtilClient.is_unset(request.time_step): + query['TimeStep'] = request.time_step + if not UtilClient.is_unset(request.verbose): + query['Verbose'] = request.verbose + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='GetNodeMetrics', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/resources/{OpenApiUtilClient.get_encode_param(resource_group_id)}/nodemetrics/{OpenApiUtilClient.get_encode_param(metric_type)}', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.GetNodeMetricsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def get_node_metrics( + self, + resource_group_id: str, + metric_type: str, + request: pai_studio_20220112_models.GetNodeMetricsRequest, + ) -> pai_studio_20220112_models.GetNodeMetricsResponse: + """ + @summary get resource group node metrics + + @param request: GetNodeMetricsRequest + @return: GetNodeMetricsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.get_node_metrics_with_options(resource_group_id, metric_type, request, headers, runtime) + + async def get_node_metrics_async( + self, + resource_group_id: str, + metric_type: str, + request: pai_studio_20220112_models.GetNodeMetricsRequest, + ) -> pai_studio_20220112_models.GetNodeMetricsResponse: + """ + @summary get resource group node metrics + + @param request: GetNodeMetricsRequest + @return: GetNodeMetricsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.get_node_metrics_with_options_async(resource_group_id, metric_type, request, headers, runtime) + + def get_node_view_metrics_with_options( + self, + resource_group_id: str, + request: pai_studio_20220112_models.GetNodeViewMetricsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.GetNodeViewMetricsResponse: + """ + @summary 获取节点视角的metrics + + @param request: GetNodeViewMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetNodeViewMetricsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.node_id): + query['NodeId'] = request.node_id + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.time_step): + query['TimeStep'] = request.time_step + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='GetNodeViewMetrics', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/resources/{OpenApiUtilClient.get_encode_param(resource_group_id)}/nodeviewmetrics', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.GetNodeViewMetricsResponse(), + self.call_api(params, req, runtime) + ) + + async def get_node_view_metrics_with_options_async( + self, + resource_group_id: str, + request: pai_studio_20220112_models.GetNodeViewMetricsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.GetNodeViewMetricsResponse: + """ + @summary 获取节点视角的metrics + + @param request: GetNodeViewMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetNodeViewMetricsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.node_id): + query['NodeId'] = request.node_id + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.time_step): + query['TimeStep'] = request.time_step + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) @@ -2802,6 +4315,12 @@ def get_node_view_metrics( resource_group_id: str, request: pai_studio_20220112_models.GetNodeViewMetricsRequest, ) -> pai_studio_20220112_models.GetNodeViewMetricsResponse: + """ + @summary 获取节点视角的metrics + + @param request: GetNodeViewMetricsRequest + @return: GetNodeViewMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_node_view_metrics_with_options(resource_group_id, request, headers, runtime) @@ -2811,6 +4330,12 @@ async def get_node_view_metrics_async( resource_group_id: str, request: pai_studio_20220112_models.GetNodeViewMetricsRequest, ) -> pai_studio_20220112_models.GetNodeViewMetricsResponse: + """ + @summary 获取节点视角的metrics + + @param request: GetNodeViewMetricsRequest + @return: GetNodeViewMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.get_node_view_metrics_with_options_async(resource_group_id, request, headers, runtime) @@ -2821,6 +4346,13 @@ def get_operation_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetOperationResponse: + """ + @summary 获取资源变更详情 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetOperationResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -2846,6 +4378,13 @@ async def get_operation_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetOperationResponse: + """ + @summary 获取资源变更详情 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetOperationResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -2869,6 +4408,11 @@ def get_operation( self, operation_id: str, ) -> pai_studio_20220112_models.GetOperationResponse: + """ + @summary 获取资源变更详情 + + @return: GetOperationResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_operation_with_options(operation_id, headers, runtime) @@ -2877,18 +4421,169 @@ async def get_operation_async( self, operation_id: str, ) -> pai_studio_20220112_models.GetOperationResponse: + """ + @summary 获取资源变更详情 + + @return: GetOperationResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.get_operation_with_options_async(operation_id, headers, runtime) + def get_queue_infos_with_options( + self, + request: pai_studio_20220112_models.GetQueueInfosRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.GetQueueInfosResponse: + """ + @summary 您可以通过GetQueueInfos得到一组队列的排队信息。 + + @param request: GetQueueInfosRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetQueueInfosResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.quota_ids): + query['QuotaIds'] = request.quota_ids + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.workload_ids): + query['WorkloadIds'] = request.workload_ids + if not UtilClient.is_unset(request.workload_type): + query['WorkloadType'] = request.workload_type + if not UtilClient.is_unset(request.workspace_ids): + query['WorkspaceIds'] = request.workspace_ids + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='GetQueueInfos', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/queueInfos', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.GetQueueInfosResponse(), + self.call_api(params, req, runtime) + ) + + async def get_queue_infos_with_options_async( + self, + request: pai_studio_20220112_models.GetQueueInfosRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.GetQueueInfosResponse: + """ + @summary 您可以通过GetQueueInfos得到一组队列的排队信息。 + + @param request: GetQueueInfosRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetQueueInfosResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.quota_ids): + query['QuotaIds'] = request.quota_ids + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.workload_ids): + query['WorkloadIds'] = request.workload_ids + if not UtilClient.is_unset(request.workload_type): + query['WorkloadType'] = request.workload_type + if not UtilClient.is_unset(request.workspace_ids): + query['WorkspaceIds'] = request.workspace_ids + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='GetQueueInfos', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/queueInfos', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.GetQueueInfosResponse(), + await self.call_api_async(params, req, runtime) + ) + + def get_queue_infos( + self, + request: pai_studio_20220112_models.GetQueueInfosRequest, + ) -> pai_studio_20220112_models.GetQueueInfosResponse: + """ + @summary 您可以通过GetQueueInfos得到一组队列的排队信息。 + + @param request: GetQueueInfosRequest + @return: GetQueueInfosResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.get_queue_infos_with_options(request, headers, runtime) + + async def get_queue_infos_async( + self, + request: pai_studio_20220112_models.GetQueueInfosRequest, + ) -> pai_studio_20220112_models.GetQueueInfosResponse: + """ + @summary 您可以通过GetQueueInfos得到一组队列的排队信息。 + + @param request: GetQueueInfosRequest + @return: GetQueueInfosResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.get_queue_infos_with_options_async(request, headers, runtime) + def get_quota_with_options( self, quota_id: str, + request: pai_studio_20220112_models.GetQuotaRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetQuotaResponse: + """ + @summary 获取Quota + + @param request: GetQuotaRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetQuotaResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.verbose): + query['Verbose'] = request.verbose req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='GetQuota', @@ -2909,11 +4604,25 @@ def get_quota_with_options( async def get_quota_with_options_async( self, quota_id: str, + request: pai_studio_20220112_models.GetQuotaRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetQuotaResponse: + """ + @summary 获取Quota + + @param request: GetQuotaRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetQuotaResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.verbose): + query['Verbose'] = request.verbose req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='GetQuota', @@ -2934,18 +4643,32 @@ async def get_quota_with_options_async( def get_quota( self, quota_id: str, + request: pai_studio_20220112_models.GetQuotaRequest, ) -> pai_studio_20220112_models.GetQuotaResponse: + """ + @summary 获取Quota + + @param request: GetQuotaRequest + @return: GetQuotaResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.get_quota_with_options(quota_id, headers, runtime) + return self.get_quota_with_options(quota_id, request, headers, runtime) async def get_quota_async( self, quota_id: str, + request: pai_studio_20220112_models.GetQuotaRequest, ) -> pai_studio_20220112_models.GetQuotaResponse: + """ + @summary 获取Quota + + @param request: GetQuotaRequest + @return: GetQuotaResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.get_quota_with_options_async(quota_id, headers, runtime) + return await self.get_quota_with_options_async(quota_id, request, headers, runtime) def get_quota_job_view_metrics_with_options( self, @@ -2954,6 +4677,14 @@ def get_quota_job_view_metrics_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetQuotaJobViewMetricsResponse: + """ + @summary 获取资源配额内运行的DLC、DSW任务的性能指标 + + @param request: GetQuotaJobViewMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetQuotaJobViewMetricsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.end_time): @@ -2999,6 +4730,14 @@ async def get_quota_job_view_metrics_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetQuotaJobViewMetricsResponse: + """ + @summary 获取资源配额内运行的DLC、DSW任务的性能指标 + + @param request: GetQuotaJobViewMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetQuotaJobViewMetricsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.end_time): @@ -3042,6 +4781,12 @@ def get_quota_job_view_metrics( quota_id: str, request: pai_studio_20220112_models.GetQuotaJobViewMetricsRequest, ) -> pai_studio_20220112_models.GetQuotaJobViewMetricsResponse: + """ + @summary 获取资源配额内运行的DLC、DSW任务的性能指标 + + @param request: GetQuotaJobViewMetricsRequest + @return: GetQuotaJobViewMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_quota_job_view_metrics_with_options(quota_id, request, headers, runtime) @@ -3051,6 +4796,12 @@ async def get_quota_job_view_metrics_async( quota_id: str, request: pai_studio_20220112_models.GetQuotaJobViewMetricsRequest, ) -> pai_studio_20220112_models.GetQuotaJobViewMetricsResponse: + """ + @summary 获取资源配额内运行的DLC、DSW任务的性能指标 + + @param request: GetQuotaJobViewMetricsRequest + @return: GetQuotaJobViewMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.get_quota_job_view_metrics_with_options_async(quota_id, request, headers, runtime) @@ -3063,6 +4814,14 @@ def get_quota_metrics_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetQuotaMetricsResponse: + """ + @summary 资源配额组维度指标 + + @param request: GetQuotaMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetQuotaMetricsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.end_time): @@ -3101,6 +4860,14 @@ async def get_quota_metrics_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetQuotaMetricsResponse: + """ + @summary 资源配额组维度指标 + + @param request: GetQuotaMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetQuotaMetricsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.end_time): @@ -3137,6 +4904,12 @@ def get_quota_metrics( metric_type: str, request: pai_studio_20220112_models.GetQuotaMetricsRequest, ) -> pai_studio_20220112_models.GetQuotaMetricsResponse: + """ + @summary 资源配额组维度指标 + + @param request: GetQuotaMetricsRequest + @return: GetQuotaMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_quota_metrics_with_options(quota_id, metric_type, request, headers, runtime) @@ -3147,6 +4920,12 @@ async def get_quota_metrics_async( metric_type: str, request: pai_studio_20220112_models.GetQuotaMetricsRequest, ) -> pai_studio_20220112_models.GetQuotaMetricsResponse: + """ + @summary 资源配额组维度指标 + + @param request: GetQuotaMetricsRequest + @return: GetQuotaMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.get_quota_metrics_with_options_async(quota_id, metric_type, request, headers, runtime) @@ -3159,6 +4938,14 @@ def get_quota_node_metrics_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetQuotaNodeMetricsResponse: + """ + @summary 资源配额内节点指标 + + @param request: GetQuotaNodeMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetQuotaNodeMetricsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.end_time): @@ -3199,6 +4986,14 @@ async def get_quota_node_metrics_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetQuotaNodeMetricsResponse: + """ + @summary 资源配额内节点指标 + + @param request: GetQuotaNodeMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetQuotaNodeMetricsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.end_time): @@ -3237,6 +5032,12 @@ def get_quota_node_metrics( metric_type: str, request: pai_studio_20220112_models.GetQuotaNodeMetricsRequest, ) -> pai_studio_20220112_models.GetQuotaNodeMetricsResponse: + """ + @summary 资源配额内节点指标 + + @param request: GetQuotaNodeMetricsRequest + @return: GetQuotaNodeMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_quota_node_metrics_with_options(quota_id, metric_type, request, headers, runtime) @@ -3247,6 +5048,12 @@ async def get_quota_node_metrics_async( metric_type: str, request: pai_studio_20220112_models.GetQuotaNodeMetricsRequest, ) -> pai_studio_20220112_models.GetQuotaNodeMetricsResponse: + """ + @summary 资源配额内节点指标 + + @param request: GetQuotaNodeMetricsRequest + @return: GetQuotaNodeMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.get_quota_node_metrics_with_options_async(quota_id, metric_type, request, headers, runtime) @@ -3258,14 +5065,34 @@ def get_quota_node_view_metrics_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetQuotaNodeViewMetricsResponse: + """ + @summary 获取资源配额内节点实时的性能指标 + + @param request: GetQuotaNodeViewMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetQuotaNodeViewMetricsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.node_id): query['NodeId'] = request.node_id + if not UtilClient.is_unset(request.node_status): + query['NodeStatus'] = request.node_status + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.order_status): + query['OrderStatus'] = request.order_status if not UtilClient.is_unset(request.page_number): query['PageNumber'] = request.page_number if not UtilClient.is_unset(request.page_size): query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.resource_group_id): + query['ResourceGroupId'] = request.resource_group_id + if not UtilClient.is_unset(request.self_only): + query['SelfOnly'] = request.self_only + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by if not UtilClient.is_unset(request.time_step): query['TimeStep'] = request.time_step if not UtilClient.is_unset(request.workspace_id): @@ -3297,14 +5124,34 @@ async def get_quota_node_view_metrics_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetQuotaNodeViewMetricsResponse: + """ + @summary 获取资源配额内节点实时的性能指标 + + @param request: GetQuotaNodeViewMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetQuotaNodeViewMetricsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.node_id): query['NodeId'] = request.node_id + if not UtilClient.is_unset(request.node_status): + query['NodeStatus'] = request.node_status + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.order_status): + query['OrderStatus'] = request.order_status if not UtilClient.is_unset(request.page_number): query['PageNumber'] = request.page_number if not UtilClient.is_unset(request.page_size): query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.resource_group_id): + query['ResourceGroupId'] = request.resource_group_id + if not UtilClient.is_unset(request.self_only): + query['SelfOnly'] = request.self_only + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by if not UtilClient.is_unset(request.time_step): query['TimeStep'] = request.time_step if not UtilClient.is_unset(request.workspace_id): @@ -3334,6 +5181,12 @@ def get_quota_node_view_metrics( quota_id: str, request: pai_studio_20220112_models.GetQuotaNodeViewMetricsRequest, ) -> pai_studio_20220112_models.GetQuotaNodeViewMetricsResponse: + """ + @summary 获取资源配额内节点实时的性能指标 + + @param request: GetQuotaNodeViewMetricsRequest + @return: GetQuotaNodeViewMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_quota_node_view_metrics_with_options(quota_id, request, headers, runtime) @@ -3343,10 +5196,164 @@ async def get_quota_node_view_metrics_async( quota_id: str, request: pai_studio_20220112_models.GetQuotaNodeViewMetricsRequest, ) -> pai_studio_20220112_models.GetQuotaNodeViewMetricsResponse: + """ + @summary 获取资源配额内节点实时的性能指标 + + @param request: GetQuotaNodeViewMetricsRequest + @return: GetQuotaNodeViewMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.get_quota_node_view_metrics_with_options_async(quota_id, request, headers, runtime) + def get_quota_queue_info_with_options( + self, + quota_id: str, + request: pai_studio_20220112_models.GetQuotaQueueInfoRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.GetQuotaQueueInfoResponse: + """ + @summary 您可以通过 GetQuotaQueueInfo得到使用当前Quota的实例的排队信息。 + + @param request: GetQuotaQueueInfoRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetQuotaQueueInfoResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.before_workload_id): + query['BeforeWorkloadId'] = request.before_workload_id + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.show_own): + query['ShowOwn'] = request.show_own + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.status): + query['Status'] = request.status + if not UtilClient.is_unset(request.sub_quota_ids): + query['SubQuotaIds'] = request.sub_quota_ids + if not UtilClient.is_unset(request.workload_ids): + query['WorkloadIds'] = request.workload_ids + if not UtilClient.is_unset(request.workload_type): + query['WorkloadType'] = request.workload_type + if not UtilClient.is_unset(request.workspace_ids): + query['WorkspaceIds'] = request.workspace_ids + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='GetQuotaQueueInfo', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/quotas/{OpenApiUtilClient.get_encode_param(quota_id)}/queueinfos', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.GetQuotaQueueInfoResponse(), + self.call_api(params, req, runtime) + ) + + async def get_quota_queue_info_with_options_async( + self, + quota_id: str, + request: pai_studio_20220112_models.GetQuotaQueueInfoRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.GetQuotaQueueInfoResponse: + """ + @summary 您可以通过 GetQuotaQueueInfo得到使用当前Quota的实例的排队信息。 + + @param request: GetQuotaQueueInfoRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetQuotaQueueInfoResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.before_workload_id): + query['BeforeWorkloadId'] = request.before_workload_id + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.show_own): + query['ShowOwn'] = request.show_own + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.status): + query['Status'] = request.status + if not UtilClient.is_unset(request.sub_quota_ids): + query['SubQuotaIds'] = request.sub_quota_ids + if not UtilClient.is_unset(request.workload_ids): + query['WorkloadIds'] = request.workload_ids + if not UtilClient.is_unset(request.workload_type): + query['WorkloadType'] = request.workload_type + if not UtilClient.is_unset(request.workspace_ids): + query['WorkspaceIds'] = request.workspace_ids + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='GetQuotaQueueInfo', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/quotas/{OpenApiUtilClient.get_encode_param(quota_id)}/queueinfos', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.GetQuotaQueueInfoResponse(), + await self.call_api_async(params, req, runtime) + ) + + def get_quota_queue_info( + self, + quota_id: str, + request: pai_studio_20220112_models.GetQuotaQueueInfoRequest, + ) -> pai_studio_20220112_models.GetQuotaQueueInfoResponse: + """ + @summary 您可以通过 GetQuotaQueueInfo得到使用当前Quota的实例的排队信息。 + + @param request: GetQuotaQueueInfoRequest + @return: GetQuotaQueueInfoResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.get_quota_queue_info_with_options(quota_id, request, headers, runtime) + + async def get_quota_queue_info_async( + self, + quota_id: str, + request: pai_studio_20220112_models.GetQuotaQueueInfoRequest, + ) -> pai_studio_20220112_models.GetQuotaQueueInfoResponse: + """ + @summary 您可以通过 GetQuotaQueueInfo得到使用当前Quota的实例的排队信息。 + + @param request: GetQuotaQueueInfoRequest + @return: GetQuotaQueueInfoResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.get_quota_queue_info_with_options_async(quota_id, request, headers, runtime) + def get_quota_range_user_view_metrics_with_options( self, quota_id: str, @@ -3354,6 +5361,14 @@ def get_quota_range_user_view_metrics_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetQuotaRangeUserViewMetricsResponse: + """ + @summary 获取资源配额用户视图的历史资源使用情况 + + @param request: GetQuotaRangeUserViewMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetQuotaRangeUserViewMetricsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.end_time): @@ -3377,10 +5392,136 @@ def get_quota_range_user_view_metrics_with_options( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='GetQuotaRangeUserViewMetrics', + action='GetQuotaRangeUserViewMetrics', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/quotas/{OpenApiUtilClient.get_encode_param(quota_id)}/rangeusermetrics', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.GetQuotaRangeUserViewMetricsResponse(), + self.call_api(params, req, runtime) + ) + + async def get_quota_range_user_view_metrics_with_options_async( + self, + quota_id: str, + request: pai_studio_20220112_models.GetQuotaRangeUserViewMetricsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.GetQuotaRangeUserViewMetricsResponse: + """ + @summary 获取资源配额用户视图的历史资源使用情况 + + @param request: GetQuotaRangeUserViewMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetQuotaRangeUserViewMetricsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.end_time): + query['EndTime'] = request.end_time + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.start_time): + query['StartTime'] = request.start_time + if not UtilClient.is_unset(request.user_id): + query['UserId'] = request.user_id + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='GetQuotaRangeUserViewMetrics', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/quotas/{OpenApiUtilClient.get_encode_param(quota_id)}/rangeusermetrics', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.GetQuotaRangeUserViewMetricsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def get_quota_range_user_view_metrics( + self, + quota_id: str, + request: pai_studio_20220112_models.GetQuotaRangeUserViewMetricsRequest, + ) -> pai_studio_20220112_models.GetQuotaRangeUserViewMetricsResponse: + """ + @summary 获取资源配额用户视图的历史资源使用情况 + + @param request: GetQuotaRangeUserViewMetricsRequest + @return: GetQuotaRangeUserViewMetricsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.get_quota_range_user_view_metrics_with_options(quota_id, request, headers, runtime) + + async def get_quota_range_user_view_metrics_async( + self, + quota_id: str, + request: pai_studio_20220112_models.GetQuotaRangeUserViewMetricsRequest, + ) -> pai_studio_20220112_models.GetQuotaRangeUserViewMetricsResponse: + """ + @summary 获取资源配额用户视图的历史资源使用情况 + + @param request: GetQuotaRangeUserViewMetricsRequest + @return: GetQuotaRangeUserViewMetricsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.get_quota_range_user_view_metrics_with_options_async(quota_id, request, headers, runtime) + + def get_quota_topo_with_options( + self, + quota_id: str, + request: pai_studio_20220112_models.GetQuotaTopoRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.GetQuotaTopoResponse: + """ + @summary 获取Quota拓扑信息 + + @param request: GetQuotaTopoRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetQuotaTopoResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.depth): + query['Depth'] = request.depth + if not UtilClient.is_unset(request.show_own_workloads): + query['ShowOwnWorkloads'] = request.show_own_workloads + if not UtilClient.is_unset(request.verbose): + query['Verbose'] = request.verbose + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='GetQuotaTopo', version='2022-01-12', protocol='HTTPS', - pathname=f'/api/v1/quotas/{OpenApiUtilClient.get_encode_param(quota_id)}/rangeusermetrics', + pathname=f'/api/v1/quotas/%5BQuotaId%5D/topo', method='GET', auth_type='AK', style='ROA', @@ -3388,44 +5529,42 @@ def get_quota_range_user_view_metrics_with_options( body_type='json' ) return TeaCore.from_map( - pai_studio_20220112_models.GetQuotaRangeUserViewMetricsResponse(), + pai_studio_20220112_models.GetQuotaTopoResponse(), self.call_api(params, req, runtime) ) - async def get_quota_range_user_view_metrics_with_options_async( + async def get_quota_topo_with_options_async( self, quota_id: str, - request: pai_studio_20220112_models.GetQuotaRangeUserViewMetricsRequest, - headers: Dict[str, str], - runtime: util_models.RuntimeOptions, - ) -> pai_studio_20220112_models.GetQuotaRangeUserViewMetricsResponse: + request: pai_studio_20220112_models.GetQuotaTopoRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.GetQuotaTopoResponse: + """ + @summary 获取Quota拓扑信息 + + @param request: GetQuotaTopoRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetQuotaTopoResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.end_time): - query['EndTime'] = request.end_time - if not UtilClient.is_unset(request.order): - query['Order'] = request.order - if not UtilClient.is_unset(request.page_number): - query['PageNumber'] = request.page_number - if not UtilClient.is_unset(request.page_size): - query['PageSize'] = request.page_size - if not UtilClient.is_unset(request.sort_by): - query['SortBy'] = request.sort_by - if not UtilClient.is_unset(request.start_time): - query['StartTime'] = request.start_time - if not UtilClient.is_unset(request.user_id): - query['UserId'] = request.user_id - if not UtilClient.is_unset(request.workspace_id): - query['WorkspaceId'] = request.workspace_id + if not UtilClient.is_unset(request.depth): + query['Depth'] = request.depth + if not UtilClient.is_unset(request.show_own_workloads): + query['ShowOwnWorkloads'] = request.show_own_workloads + if not UtilClient.is_unset(request.verbose): + query['Verbose'] = request.verbose req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='GetQuotaRangeUserViewMetrics', + action='GetQuotaTopo', version='2022-01-12', protocol='HTTPS', - pathname=f'/api/v1/quotas/{OpenApiUtilClient.get_encode_param(quota_id)}/rangeusermetrics', + pathname=f'/api/v1/quotas/%5BQuotaId%5D/topo', method='GET', auth_type='AK', style='ROA', @@ -3433,27 +5572,39 @@ async def get_quota_range_user_view_metrics_with_options_async( body_type='json' ) return TeaCore.from_map( - pai_studio_20220112_models.GetQuotaRangeUserViewMetricsResponse(), + pai_studio_20220112_models.GetQuotaTopoResponse(), await self.call_api_async(params, req, runtime) ) - def get_quota_range_user_view_metrics( + def get_quota_topo( self, quota_id: str, - request: pai_studio_20220112_models.GetQuotaRangeUserViewMetricsRequest, - ) -> pai_studio_20220112_models.GetQuotaRangeUserViewMetricsResponse: + request: pai_studio_20220112_models.GetQuotaTopoRequest, + ) -> pai_studio_20220112_models.GetQuotaTopoResponse: + """ + @summary 获取Quota拓扑信息 + + @param request: GetQuotaTopoRequest + @return: GetQuotaTopoResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.get_quota_range_user_view_metrics_with_options(quota_id, request, headers, runtime) + return self.get_quota_topo_with_options(quota_id, request, headers, runtime) - async def get_quota_range_user_view_metrics_async( + async def get_quota_topo_async( self, quota_id: str, - request: pai_studio_20220112_models.GetQuotaRangeUserViewMetricsRequest, - ) -> pai_studio_20220112_models.GetQuotaRangeUserViewMetricsResponse: + request: pai_studio_20220112_models.GetQuotaTopoRequest, + ) -> pai_studio_20220112_models.GetQuotaTopoResponse: + """ + @summary 获取Quota拓扑信息 + + @param request: GetQuotaTopoRequest + @return: GetQuotaTopoResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.get_quota_range_user_view_metrics_with_options_async(quota_id, request, headers, runtime) + return await self.get_quota_topo_with_options_async(quota_id, request, headers, runtime) def get_quota_user_view_metrics_with_options( self, @@ -3462,6 +5613,14 @@ def get_quota_user_view_metrics_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetQuotaUserViewMetricsResponse: + """ + @summary 获取用户视图的资源使用情况 + + @param request: GetQuotaUserViewMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetQuotaUserViewMetricsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.order): @@ -3505,6 +5664,14 @@ async def get_quota_user_view_metrics_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetQuotaUserViewMetricsResponse: + """ + @summary 获取用户视图的资源使用情况 + + @param request: GetQuotaUserViewMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetQuotaUserViewMetricsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.order): @@ -3546,6 +5713,12 @@ def get_quota_user_view_metrics( quota_id: str, request: pai_studio_20220112_models.GetQuotaUserViewMetricsRequest, ) -> pai_studio_20220112_models.GetQuotaUserViewMetricsResponse: + """ + @summary 获取用户视图的资源使用情况 + + @param request: GetQuotaUserViewMetricsRequest + @return: GetQuotaUserViewMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_quota_user_view_metrics_with_options(quota_id, request, headers, runtime) @@ -3555,6 +5728,12 @@ async def get_quota_user_view_metrics_async( quota_id: str, request: pai_studio_20220112_models.GetQuotaUserViewMetricsRequest, ) -> pai_studio_20220112_models.GetQuotaUserViewMetricsResponse: + """ + @summary 获取用户视图的资源使用情况 + + @param request: GetQuotaUserViewMetricsRequest + @return: GetQuotaUserViewMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.get_quota_user_view_metrics_with_options_async(quota_id, request, headers, runtime) @@ -3566,6 +5745,14 @@ def get_range_user_view_metrics_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetRangeUserViewMetricsResponse: + """ + @summary 获取按照user统计的性能指标的历史数据 + + @param request: GetRangeUserViewMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetRangeUserViewMetricsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.end_time): @@ -3611,6 +5798,14 @@ async def get_range_user_view_metrics_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetRangeUserViewMetricsResponse: + """ + @summary 获取按照user统计的性能指标的历史数据 + + @param request: GetRangeUserViewMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetRangeUserViewMetricsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.end_time): @@ -3654,6 +5849,12 @@ def get_range_user_view_metrics( resource_group_id: str, request: pai_studio_20220112_models.GetRangeUserViewMetricsRequest, ) -> pai_studio_20220112_models.GetRangeUserViewMetricsResponse: + """ + @summary 获取按照user统计的性能指标的历史数据 + + @param request: GetRangeUserViewMetricsRequest + @return: GetRangeUserViewMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_range_user_view_metrics_with_options(resource_group_id, request, headers, runtime) @@ -3663,6 +5864,12 @@ async def get_range_user_view_metrics_async( resource_group_id: str, request: pai_studio_20220112_models.GetRangeUserViewMetricsRequest, ) -> pai_studio_20220112_models.GetRangeUserViewMetricsResponse: + """ + @summary 获取按照user统计的性能指标的历史数据 + + @param request: GetRangeUserViewMetricsRequest + @return: GetRangeUserViewMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.get_range_user_view_metrics_with_options_async(resource_group_id, request, headers, runtime) @@ -3670,14 +5877,28 @@ async def get_range_user_view_metrics_async( def get_resource_group_with_options( self, resource_group_id: str, - request: pai_studio_20220112_models.GetResourceGroupRequest, + tmp_req: pai_studio_20220112_models.GetResourceGroupRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetResourceGroupResponse: - UtilClient.validate_model(request) + """ + @summary get resource group by group id + + @param tmp_req: GetResourceGroupRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetResourceGroupResponse + """ + UtilClient.validate_model(tmp_req) + request = pai_studio_20220112_models.GetResourceGroupShrinkRequest() + OpenApiUtilClient.convert(tmp_req, request) + if not UtilClient.is_unset(tmp_req.tag): + request.tag_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.tag, 'Tag', 'json') query = {} if not UtilClient.is_unset(request.is_aiworkspace_data_enabled): query['IsAIWorkspaceDataEnabled'] = request.is_aiworkspace_data_enabled + if not UtilClient.is_unset(request.tag_shrink): + query['Tag'] = request.tag_shrink req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) @@ -3701,14 +5922,28 @@ def get_resource_group_with_options( async def get_resource_group_with_options_async( self, resource_group_id: str, - request: pai_studio_20220112_models.GetResourceGroupRequest, + tmp_req: pai_studio_20220112_models.GetResourceGroupRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetResourceGroupResponse: - UtilClient.validate_model(request) + """ + @summary get resource group by group id + + @param tmp_req: GetResourceGroupRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetResourceGroupResponse + """ + UtilClient.validate_model(tmp_req) + request = pai_studio_20220112_models.GetResourceGroupShrinkRequest() + OpenApiUtilClient.convert(tmp_req, request) + if not UtilClient.is_unset(tmp_req.tag): + request.tag_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.tag, 'Tag', 'json') query = {} if not UtilClient.is_unset(request.is_aiworkspace_data_enabled): query['IsAIWorkspaceDataEnabled'] = request.is_aiworkspace_data_enabled + if not UtilClient.is_unset(request.tag_shrink): + query['Tag'] = request.tag_shrink req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) @@ -3734,6 +5969,12 @@ def get_resource_group( resource_group_id: str, request: pai_studio_20220112_models.GetResourceGroupRequest, ) -> pai_studio_20220112_models.GetResourceGroupResponse: + """ + @summary get resource group by group id + + @param request: GetResourceGroupRequest + @return: GetResourceGroupResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_resource_group_with_options(resource_group_id, request, headers, runtime) @@ -3743,6 +5984,12 @@ async def get_resource_group_async( resource_group_id: str, request: pai_studio_20220112_models.GetResourceGroupRequest, ) -> pai_studio_20220112_models.GetResourceGroupResponse: + """ + @summary get resource group by group id + + @param request: GetResourceGroupRequest + @return: GetResourceGroupResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.get_resource_group_with_options_async(resource_group_id, request, headers, runtime) @@ -3751,11 +5998,29 @@ def get_resource_group_machine_group_with_options( self, machine_group_id: str, resource_group_id: str, + tmp_req: pai_studio_20220112_models.GetResourceGroupMachineGroupRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetResourceGroupMachineGroupResponse: + """ + @summary get machine group + + @param tmp_req: GetResourceGroupMachineGroupRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetResourceGroupMachineGroupResponse + """ + UtilClient.validate_model(tmp_req) + request = pai_studio_20220112_models.GetResourceGroupMachineGroupShrinkRequest() + OpenApiUtilClient.convert(tmp_req, request) + if not UtilClient.is_unset(tmp_req.tag): + request.tag_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.tag, 'Tag', 'json') + query = {} + if not UtilClient.is_unset(request.tag_shrink): + query['Tag'] = request.tag_shrink req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='GetResourceGroupMachineGroup', @@ -3777,11 +6042,29 @@ async def get_resource_group_machine_group_with_options_async( self, machine_group_id: str, resource_group_id: str, + tmp_req: pai_studio_20220112_models.GetResourceGroupMachineGroupRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetResourceGroupMachineGroupResponse: + """ + @summary get machine group + + @param tmp_req: GetResourceGroupMachineGroupRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetResourceGroupMachineGroupResponse + """ + UtilClient.validate_model(tmp_req) + request = pai_studio_20220112_models.GetResourceGroupMachineGroupShrinkRequest() + OpenApiUtilClient.convert(tmp_req, request) + if not UtilClient.is_unset(tmp_req.tag): + request.tag_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.tag, 'Tag', 'json') + query = {} + if not UtilClient.is_unset(request.tag_shrink): + query['Tag'] = request.tag_shrink req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( action='GetResourceGroupMachineGroup', @@ -3803,19 +6086,33 @@ def get_resource_group_machine_group( self, machine_group_id: str, resource_group_id: str, + request: pai_studio_20220112_models.GetResourceGroupMachineGroupRequest, ) -> pai_studio_20220112_models.GetResourceGroupMachineGroupResponse: + """ + @summary get machine group + + @param request: GetResourceGroupMachineGroupRequest + @return: GetResourceGroupMachineGroupResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.get_resource_group_machine_group_with_options(machine_group_id, resource_group_id, headers, runtime) + return self.get_resource_group_machine_group_with_options(machine_group_id, resource_group_id, request, headers, runtime) async def get_resource_group_machine_group_async( self, machine_group_id: str, resource_group_id: str, + request: pai_studio_20220112_models.GetResourceGroupMachineGroupRequest, ) -> pai_studio_20220112_models.GetResourceGroupMachineGroupResponse: + """ + @summary get machine group + + @param request: GetResourceGroupMachineGroupRequest + @return: GetResourceGroupMachineGroupResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.get_resource_group_machine_group_with_options_async(machine_group_id, resource_group_id, headers, runtime) + return await self.get_resource_group_machine_group_with_options_async(machine_group_id, resource_group_id, request, headers, runtime) def get_resource_group_metrics_with_options( self, @@ -3825,6 +6122,14 @@ def get_resource_group_metrics_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetResourceGroupMetricsResponse: + """ + @summary 获取资源组卡型的使用率 + + @param request: GetResourceGroupMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetResourceGroupMetricsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.end_time): @@ -3863,6 +6168,14 @@ async def get_resource_group_metrics_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetResourceGroupMetricsResponse: + """ + @summary 获取资源组卡型的使用率 + + @param request: GetResourceGroupMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetResourceGroupMetricsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.end_time): @@ -3899,6 +6212,12 @@ def get_resource_group_metrics( metric_type: str, request: pai_studio_20220112_models.GetResourceGroupMetricsRequest, ) -> pai_studio_20220112_models.GetResourceGroupMetricsResponse: + """ + @summary 获取资源组卡型的使用率 + + @param request: GetResourceGroupMetricsRequest + @return: GetResourceGroupMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_resource_group_metrics_with_options(resource_group_id, metric_type, request, headers, runtime) @@ -3909,6 +6228,12 @@ async def get_resource_group_metrics_async( metric_type: str, request: pai_studio_20220112_models.GetResourceGroupMetricsRequest, ) -> pai_studio_20220112_models.GetResourceGroupMetricsResponse: + """ + @summary 获取资源组卡型的使用率 + + @param request: GetResourceGroupMetricsRequest + @return: GetResourceGroupMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.get_resource_group_metrics_with_options_async(resource_group_id, metric_type, request, headers, runtime) @@ -3919,6 +6244,14 @@ def get_resource_group_request_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetResourceGroupRequestResponse: + """ + @summary get resource group requested resource by resource group id + + @param request: GetResourceGroupRequestRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetResourceGroupRequestResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.pod_status): @@ -3951,6 +6284,14 @@ async def get_resource_group_request_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetResourceGroupRequestResponse: + """ + @summary get resource group requested resource by resource group id + + @param request: GetResourceGroupRequestRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetResourceGroupRequestResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.pod_status): @@ -3981,6 +6322,12 @@ def get_resource_group_request( self, request: pai_studio_20220112_models.GetResourceGroupRequestRequest, ) -> pai_studio_20220112_models.GetResourceGroupRequestResponse: + """ + @summary get resource group requested resource by resource group id + + @param request: GetResourceGroupRequestRequest + @return: GetResourceGroupRequestResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_resource_group_request_with_options(request, headers, runtime) @@ -3989,6 +6336,12 @@ async def get_resource_group_request_async( self, request: pai_studio_20220112_models.GetResourceGroupRequestRequest, ) -> pai_studio_20220112_models.GetResourceGroupRequestResponse: + """ + @summary get resource group requested resource by resource group id + + @param request: GetResourceGroupRequestRequest + @return: GetResourceGroupRequestResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.get_resource_group_request_with_options_async(request, headers, runtime) @@ -3999,6 +6352,14 @@ def get_resource_group_total_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetResourceGroupTotalResponse: + """ + @summary get resource group total resource by group id + + @param request: GetResourceGroupTotalRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetResourceGroupTotalResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.resource_group_id): @@ -4029,6 +6390,14 @@ async def get_resource_group_total_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetResourceGroupTotalResponse: + """ + @summary get resource group total resource by group id + + @param request: GetResourceGroupTotalRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetResourceGroupTotalResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.resource_group_id): @@ -4057,6 +6426,12 @@ def get_resource_group_total( self, request: pai_studio_20220112_models.GetResourceGroupTotalRequest, ) -> pai_studio_20220112_models.GetResourceGroupTotalResponse: + """ + @summary get resource group total resource by group id + + @param request: GetResourceGroupTotalRequest + @return: GetResourceGroupTotalResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_resource_group_total_with_options(request, headers, runtime) @@ -4065,6 +6440,12 @@ async def get_resource_group_total_async( self, request: pai_studio_20220112_models.GetResourceGroupTotalRequest, ) -> pai_studio_20220112_models.GetResourceGroupTotalResponse: + """ + @summary get resource group total resource by group id + + @param request: GetResourceGroupTotalRequest + @return: GetResourceGroupTotalResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.get_resource_group_total_with_options_async(request, headers, runtime) @@ -4075,6 +6456,13 @@ def get_service_identity_role_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetServiceIdentityRoleResponse: + """ + @summary 获取服务认证角色 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetServiceIdentityRoleResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -4100,6 +6488,13 @@ async def get_service_identity_role_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetServiceIdentityRoleResponse: + """ + @summary 获取服务认证角色 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetServiceIdentityRoleResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -4123,6 +6518,11 @@ def get_service_identity_role( self, role_name: str, ) -> pai_studio_20220112_models.GetServiceIdentityRoleResponse: + """ + @summary 获取服务认证角色 + + @return: GetServiceIdentityRoleResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_service_identity_role_with_options(role_name, headers, runtime) @@ -4131,16 +6531,247 @@ async def get_service_identity_role_async( self, role_name: str, ) -> pai_studio_20220112_models.GetServiceIdentityRoleResponse: + """ + @summary 获取服务认证角色 + + @return: GetServiceIdentityRoleResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.get_service_identity_role_with_options_async(role_name, headers, runtime) + def get_spot_price_history_with_options( + self, + instance_type: str, + request: pai_studio_20220112_models.GetSpotPriceHistoryRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.GetSpotPriceHistoryResponse: + """ + @summary 获取抢占式实例历史价格 + + @param request: GetSpotPriceHistoryRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetSpotPriceHistoryResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.end_time): + query['EndTime'] = request.end_time + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.start_time): + query['StartTime'] = request.start_time + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='GetSpotPriceHistory', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/spots/{OpenApiUtilClient.get_encode_param(instance_type)}/pricehistory', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.GetSpotPriceHistoryResponse(), + self.call_api(params, req, runtime) + ) + + async def get_spot_price_history_with_options_async( + self, + instance_type: str, + request: pai_studio_20220112_models.GetSpotPriceHistoryRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.GetSpotPriceHistoryResponse: + """ + @summary 获取抢占式实例历史价格 + + @param request: GetSpotPriceHistoryRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetSpotPriceHistoryResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.end_time): + query['EndTime'] = request.end_time + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.start_time): + query['StartTime'] = request.start_time + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='GetSpotPriceHistory', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/spots/{OpenApiUtilClient.get_encode_param(instance_type)}/pricehistory', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.GetSpotPriceHistoryResponse(), + await self.call_api_async(params, req, runtime) + ) + + def get_spot_price_history( + self, + instance_type: str, + request: pai_studio_20220112_models.GetSpotPriceHistoryRequest, + ) -> pai_studio_20220112_models.GetSpotPriceHistoryResponse: + """ + @summary 获取抢占式实例历史价格 + + @param request: GetSpotPriceHistoryRequest + @return: GetSpotPriceHistoryResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.get_spot_price_history_with_options(instance_type, request, headers, runtime) + + async def get_spot_price_history_async( + self, + instance_type: str, + request: pai_studio_20220112_models.GetSpotPriceHistoryRequest, + ) -> pai_studio_20220112_models.GetSpotPriceHistoryResponse: + """ + @summary 获取抢占式实例历史价格 + + @param request: GetSpotPriceHistoryRequest + @return: GetSpotPriceHistoryResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.get_spot_price_history_with_options_async(instance_type, request, headers, runtime) + + def get_spot_stock_preview_with_options( + self, + instance_type: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.GetSpotStockPreviewResponse: + """ + @summary 获取抢占式实例的库存概览 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetSpotStockPreviewResponse + """ + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='GetSpotStockPreview', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/spots/{OpenApiUtilClient.get_encode_param(instance_type)}/stockpreview', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.GetSpotStockPreviewResponse(), + self.call_api(params, req, runtime) + ) + + async def get_spot_stock_preview_with_options_async( + self, + instance_type: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.GetSpotStockPreviewResponse: + """ + @summary 获取抢占式实例的库存概览 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetSpotStockPreviewResponse + """ + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='GetSpotStockPreview', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/spots/{OpenApiUtilClient.get_encode_param(instance_type)}/stockpreview', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.GetSpotStockPreviewResponse(), + await self.call_api_async(params, req, runtime) + ) + + def get_spot_stock_preview( + self, + instance_type: str, + ) -> pai_studio_20220112_models.GetSpotStockPreviewResponse: + """ + @summary 获取抢占式实例的库存概览 + + @return: GetSpotStockPreviewResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.get_spot_stock_preview_with_options(instance_type, headers, runtime) + + async def get_spot_stock_preview_async( + self, + instance_type: str, + ) -> pai_studio_20220112_models.GetSpotStockPreviewResponse: + """ + @summary 获取抢占式实例的库存概览 + + @return: GetSpotStockPreviewResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.get_spot_stock_preview_with_options_async(instance_type, headers, runtime) + def get_token_with_options( self, request: pai_studio_20220112_models.GetTokenRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetTokenResponse: + """ + @summary 调用GetToken获取临时鉴权信息 + + @param request: GetTokenRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetTokenResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.expire_time): @@ -4173,6 +6804,14 @@ async def get_token_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetTokenResponse: + """ + @summary 调用GetToken获取临时鉴权信息 + + @param request: GetTokenRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetTokenResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.expire_time): @@ -4203,6 +6842,12 @@ def get_token( self, request: pai_studio_20220112_models.GetTokenRequest, ) -> pai_studio_20220112_models.GetTokenResponse: + """ + @summary 调用GetToken获取临时鉴权信息 + + @param request: GetTokenRequest + @return: GetTokenResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_token_with_options(request, headers, runtime) @@ -4211,6 +6856,12 @@ async def get_token_async( self, request: pai_studio_20220112_models.GetTokenRequest, ) -> pai_studio_20220112_models.GetTokenResponse: + """ + @summary 调用GetToken获取临时鉴权信息 + + @param request: GetTokenRequest + @return: GetTokenResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.get_token_with_options_async(request, headers, runtime) @@ -4222,6 +6873,14 @@ def get_training_job_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetTrainingJobResponse: + """ + @summary 获取TrainingJob的详情 + + @param request: GetTrainingJobRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetTrainingJobResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.token): @@ -4253,6 +6912,14 @@ async def get_training_job_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetTrainingJobResponse: + """ + @summary 获取TrainingJob的详情 + + @param request: GetTrainingJobRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetTrainingJobResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.token): @@ -4282,18 +6949,138 @@ def get_training_job( training_job_id: str, request: pai_studio_20220112_models.GetTrainingJobRequest, ) -> pai_studio_20220112_models.GetTrainingJobResponse: + """ + @summary 获取TrainingJob的详情 + + @param request: GetTrainingJobRequest + @return: GetTrainingJobResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_training_job_with_options(training_job_id, request, headers, runtime) - async def get_training_job_async( + async def get_training_job_async( + self, + training_job_id: str, + request: pai_studio_20220112_models.GetTrainingJobRequest, + ) -> pai_studio_20220112_models.GetTrainingJobResponse: + """ + @summary 获取TrainingJob的详情 + + @param request: GetTrainingJobRequest + @return: GetTrainingJobResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.get_training_job_with_options_async(training_job_id, request, headers, runtime) + + def get_training_job_error_info_with_options( + self, + training_job_id: str, + request: pai_studio_20220112_models.GetTrainingJobErrorInfoRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.GetTrainingJobErrorInfoResponse: + """ + @summary 获取Training Job的算法错误信息 + + @param request: GetTrainingJobErrorInfoRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetTrainingJobErrorInfoResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.token): + query['Token'] = request.token + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='GetTrainingJobErrorInfo', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/trainingjobs/{OpenApiUtilClient.get_encode_param(training_job_id)}/errorinfo', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.GetTrainingJobErrorInfoResponse(), + self.call_api(params, req, runtime) + ) + + async def get_training_job_error_info_with_options_async( + self, + training_job_id: str, + request: pai_studio_20220112_models.GetTrainingJobErrorInfoRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.GetTrainingJobErrorInfoResponse: + """ + @summary 获取Training Job的算法错误信息 + + @param request: GetTrainingJobErrorInfoRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetTrainingJobErrorInfoResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.token): + query['Token'] = request.token + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='GetTrainingJobErrorInfo', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/trainingjobs/{OpenApiUtilClient.get_encode_param(training_job_id)}/errorinfo', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.GetTrainingJobErrorInfoResponse(), + await self.call_api_async(params, req, runtime) + ) + + def get_training_job_error_info( + self, + training_job_id: str, + request: pai_studio_20220112_models.GetTrainingJobErrorInfoRequest, + ) -> pai_studio_20220112_models.GetTrainingJobErrorInfoResponse: + """ + @summary 获取Training Job的算法错误信息 + + @param request: GetTrainingJobErrorInfoRequest + @return: GetTrainingJobErrorInfoResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.get_training_job_error_info_with_options(training_job_id, request, headers, runtime) + + async def get_training_job_error_info_async( self, training_job_id: str, - request: pai_studio_20220112_models.GetTrainingJobRequest, - ) -> pai_studio_20220112_models.GetTrainingJobResponse: + request: pai_studio_20220112_models.GetTrainingJobErrorInfoRequest, + ) -> pai_studio_20220112_models.GetTrainingJobErrorInfoResponse: + """ + @summary 获取Training Job的算法错误信息 + + @param request: GetTrainingJobErrorInfoRequest + @return: GetTrainingJobErrorInfoResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.get_training_job_with_options_async(training_job_id, request, headers, runtime) + return await self.get_training_job_error_info_with_options_async(training_job_id, request, headers, runtime) def get_training_job_latest_metrics_with_options( self, @@ -4302,6 +7089,14 @@ def get_training_job_latest_metrics_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetTrainingJobLatestMetricsResponse: + """ + @summary 获取TrainingJob最近的Metrics + + @param request: GetTrainingJobLatestMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetTrainingJobLatestMetricsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.names): @@ -4335,6 +7130,14 @@ async def get_training_job_latest_metrics_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetTrainingJobLatestMetricsResponse: + """ + @summary 获取TrainingJob最近的Metrics + + @param request: GetTrainingJobLatestMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetTrainingJobLatestMetricsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.names): @@ -4366,6 +7169,12 @@ def get_training_job_latest_metrics( training_job_id: str, request: pai_studio_20220112_models.GetTrainingJobLatestMetricsRequest, ) -> pai_studio_20220112_models.GetTrainingJobLatestMetricsResponse: + """ + @summary 获取TrainingJob最近的Metrics + + @param request: GetTrainingJobLatestMetricsRequest + @return: GetTrainingJobLatestMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_training_job_latest_metrics_with_options(training_job_id, request, headers, runtime) @@ -4375,6 +7184,12 @@ async def get_training_job_latest_metrics_async( training_job_id: str, request: pai_studio_20220112_models.GetTrainingJobLatestMetricsRequest, ) -> pai_studio_20220112_models.GetTrainingJobLatestMetricsResponse: + """ + @summary 获取TrainingJob最近的Metrics + + @param request: GetTrainingJobLatestMetricsRequest + @return: GetTrainingJobLatestMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.get_training_job_latest_metrics_with_options_async(training_job_id, request, headers, runtime) @@ -4386,6 +7201,14 @@ def get_user_view_metrics_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetUserViewMetricsResponse: + """ + @summary get user view metrics + + @param request: GetUserViewMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetUserViewMetricsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.order): @@ -4429,6 +7252,14 @@ async def get_user_view_metrics_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.GetUserViewMetricsResponse: + """ + @summary get user view metrics + + @param request: GetUserViewMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: GetUserViewMetricsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.order): @@ -4470,6 +7301,12 @@ def get_user_view_metrics( resource_group_id: str, request: pai_studio_20220112_models.GetUserViewMetricsRequest, ) -> pai_studio_20220112_models.GetUserViewMetricsResponse: + """ + @summary get user view metrics + + @param request: GetUserViewMetricsRequest + @return: GetUserViewMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.get_user_view_metrics_with_options(resource_group_id, request, headers, runtime) @@ -4479,6 +7316,12 @@ async def get_user_view_metrics_async( resource_group_id: str, request: pai_studio_20220112_models.GetUserViewMetricsRequest, ) -> pai_studio_20220112_models.GetUserViewMetricsResponse: + """ + @summary get user view metrics + + @param request: GetUserViewMetricsRequest + @return: GetUserViewMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.get_user_view_metrics_with_options_async(resource_group_id, request, headers, runtime) @@ -4489,6 +7332,14 @@ def list_ai4dserivces_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListAI4DSerivcesResponse: + """ + @summary 获取AI4D服务列表 + + @param request: ListAI4DSerivcesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListAI4DSerivcesResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.service_type): @@ -4521,6 +7372,14 @@ async def list_ai4dserivces_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListAI4DSerivcesResponse: + """ + @summary 获取AI4D服务列表 + + @param request: ListAI4DSerivcesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListAI4DSerivcesResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.service_type): @@ -4551,6 +7410,12 @@ def list_ai4dserivces( self, request: pai_studio_20220112_models.ListAI4DSerivcesRequest, ) -> pai_studio_20220112_models.ListAI4DSerivcesResponse: + """ + @summary 获取AI4D服务列表 + + @param request: ListAI4DSerivcesRequest + @return: ListAI4DSerivcesResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.list_ai4dserivces_with_options(request, headers, runtime) @@ -4559,10 +7424,124 @@ async def list_ai4dserivces_async( self, request: pai_studio_20220112_models.ListAI4DSerivcesRequest, ) -> pai_studio_20220112_models.ListAI4DSerivcesResponse: + """ + @summary 获取AI4D服务列表 + + @param request: ListAI4DSerivcesRequest + @return: ListAI4DSerivcesResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.list_ai4dserivces_with_options_async(request, headers, runtime) + def list_ai4dservice_templates_with_options( + self, + request: pai_studio_20220112_models.ListAI4DServiceTemplatesRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.ListAI4DServiceTemplatesResponse: + """ + @summary 获取AI4D服务模板 + + @param request: ListAI4DServiceTemplatesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListAI4DServiceTemplatesResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.service_type): + query['ServiceType'] = request.service_type + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListAI4DServiceTemplates', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/ai4d/servicetemplates', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.ListAI4DServiceTemplatesResponse(), + self.call_api(params, req, runtime) + ) + + async def list_ai4dservice_templates_with_options_async( + self, + request: pai_studio_20220112_models.ListAI4DServiceTemplatesRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.ListAI4DServiceTemplatesResponse: + """ + @summary 获取AI4D服务模板 + + @param request: ListAI4DServiceTemplatesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListAI4DServiceTemplatesResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.service_type): + query['ServiceType'] = request.service_type + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListAI4DServiceTemplates', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/ai4d/servicetemplates', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.ListAI4DServiceTemplatesResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_ai4dservice_templates( + self, + request: pai_studio_20220112_models.ListAI4DServiceTemplatesRequest, + ) -> pai_studio_20220112_models.ListAI4DServiceTemplatesResponse: + """ + @summary 获取AI4D服务模板 + + @param request: ListAI4DServiceTemplatesRequest + @return: ListAI4DServiceTemplatesResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_ai4dservice_templates_with_options(request, headers, runtime) + + async def list_ai4dservice_templates_async( + self, + request: pai_studio_20220112_models.ListAI4DServiceTemplatesRequest, + ) -> pai_studio_20220112_models.ListAI4DServiceTemplatesResponse: + """ + @summary 获取AI4D服务模板 + + @param request: ListAI4DServiceTemplatesRequest + @return: ListAI4DServiceTemplatesResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_ai4dservice_templates_with_options_async(request, headers, runtime) + def list_algorithm_versions_with_options( self, algorithm_id: str, @@ -4570,6 +7549,14 @@ def list_algorithm_versions_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListAlgorithmVersionsResponse: + """ + @summary 获取算法的所有版本信息 + + @param request: ListAlgorithmVersionsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListAlgorithmVersionsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.page_number): @@ -4603,6 +7590,14 @@ async def list_algorithm_versions_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListAlgorithmVersionsResponse: + """ + @summary 获取算法的所有版本信息 + + @param request: ListAlgorithmVersionsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListAlgorithmVersionsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.page_number): @@ -4634,6 +7629,12 @@ def list_algorithm_versions( algorithm_id: str, request: pai_studio_20220112_models.ListAlgorithmVersionsRequest, ) -> pai_studio_20220112_models.ListAlgorithmVersionsResponse: + """ + @summary 获取算法的所有版本信息 + + @param request: ListAlgorithmVersionsRequest + @return: ListAlgorithmVersionsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.list_algorithm_versions_with_options(algorithm_id, request, headers, runtime) @@ -4643,6 +7644,12 @@ async def list_algorithm_versions_async( algorithm_id: str, request: pai_studio_20220112_models.ListAlgorithmVersionsRequest, ) -> pai_studio_20220112_models.ListAlgorithmVersionsResponse: + """ + @summary 获取算法的所有版本信息 + + @param request: ListAlgorithmVersionsRequest + @return: ListAlgorithmVersionsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.list_algorithm_versions_with_options_async(algorithm_id, request, headers, runtime) @@ -4653,6 +7660,14 @@ def list_algorithms_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListAlgorithmsResponse: + """ + @summary 获取算法列表 + + @param request: ListAlgorithmsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListAlgorithmsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.algorithm_id): @@ -4693,6 +7708,14 @@ async def list_algorithms_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListAlgorithmsResponse: + """ + @summary 获取算法列表 + + @param request: ListAlgorithmsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListAlgorithmsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.algorithm_id): @@ -4731,6 +7754,12 @@ def list_algorithms( self, request: pai_studio_20220112_models.ListAlgorithmsRequest, ) -> pai_studio_20220112_models.ListAlgorithmsResponse: + """ + @summary 获取算法列表 + + @param request: ListAlgorithmsRequest + @return: ListAlgorithmsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.list_algorithms_with_options(request, headers, runtime) @@ -4739,6 +7768,12 @@ async def list_algorithms_async( self, request: pai_studio_20220112_models.ListAlgorithmsRequest, ) -> pai_studio_20220112_models.ListAlgorithmsResponse: + """ + @summary 获取算法列表 + + @param request: ListAlgorithmsRequest + @return: ListAlgorithmsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.list_algorithms_with_options_async(request, headers, runtime) @@ -4749,6 +7784,14 @@ def list_component_version_snapshots_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListComponentVersionSnapshotsResponse: + """ + @summary 更新组件版本快照 + + @param request: ListComponentVersionSnapshotsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListComponentVersionSnapshotsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.component_id): @@ -4791,6 +7834,14 @@ async def list_component_version_snapshots_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListComponentVersionSnapshotsResponse: + """ + @summary 更新组件版本快照 + + @param request: ListComponentVersionSnapshotsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListComponentVersionSnapshotsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.component_id): @@ -4831,6 +7882,12 @@ def list_component_version_snapshots( self, request: pai_studio_20220112_models.ListComponentVersionSnapshotsRequest, ) -> pai_studio_20220112_models.ListComponentVersionSnapshotsResponse: + """ + @summary 更新组件版本快照 + + @param request: ListComponentVersionSnapshotsRequest + @return: ListComponentVersionSnapshotsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.list_component_version_snapshots_with_options(request, headers, runtime) @@ -4839,6 +7896,12 @@ async def list_component_version_snapshots_async( self, request: pai_studio_20220112_models.ListComponentVersionSnapshotsRequest, ) -> pai_studio_20220112_models.ListComponentVersionSnapshotsResponse: + """ + @summary 更新组件版本快照 + + @param request: ListComponentVersionSnapshotsRequest + @return: ListComponentVersionSnapshotsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.list_component_version_snapshots_with_options_async(request, headers, runtime) @@ -4850,6 +7913,14 @@ def list_component_versions_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListComponentVersionsResponse: + """ + @summary 获取组件版本列表 + + @param tmp_req: ListComponentVersionsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListComponentVersionsResponse + """ UtilClient.validate_model(tmp_req) request = pai_studio_20220112_models.ListComponentVersionsShrinkRequest() OpenApiUtilClient.convert(tmp_req, request) @@ -4895,6 +7966,14 @@ async def list_component_versions_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListComponentVersionsResponse: + """ + @summary 获取组件版本列表 + + @param tmp_req: ListComponentVersionsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListComponentVersionsResponse + """ UtilClient.validate_model(tmp_req) request = pai_studio_20220112_models.ListComponentVersionsShrinkRequest() OpenApiUtilClient.convert(tmp_req, request) @@ -4938,6 +8017,12 @@ def list_component_versions( component_id: str, request: pai_studio_20220112_models.ListComponentVersionsRequest, ) -> pai_studio_20220112_models.ListComponentVersionsResponse: + """ + @summary 获取组件版本列表 + + @param request: ListComponentVersionsRequest + @return: ListComponentVersionsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.list_component_versions_with_options(component_id, request, headers, runtime) @@ -4947,6 +8032,12 @@ async def list_component_versions_async( component_id: str, request: pai_studio_20220112_models.ListComponentVersionsRequest, ) -> pai_studio_20220112_models.ListComponentVersionsResponse: + """ + @summary 获取组件版本列表 + + @param request: ListComponentVersionsRequest + @return: ListComponentVersionsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.list_component_versions_with_options_async(component_id, request, headers, runtime) @@ -4957,6 +8048,14 @@ def list_components_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListComponentsResponse: + """ + @summary 获取组件列表 + + @param tmp_req: ListComponentsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListComponentsResponse + """ UtilClient.validate_model(tmp_req) request = pai_studio_20220112_models.ListComponentsShrinkRequest() OpenApiUtilClient.convert(tmp_req, request) @@ -5005,45 +8104,299 @@ def list_components_with_options( async def list_components_with_options_async( self, - tmp_req: pai_studio_20220112_models.ListComponentsRequest, - headers: Dict[str, str], - runtime: util_models.RuntimeOptions, - ) -> pai_studio_20220112_models.ListComponentsResponse: - UtilClient.validate_model(tmp_req) - request = pai_studio_20220112_models.ListComponentsShrinkRequest() - OpenApiUtilClient.convert(tmp_req, request) - if not UtilClient.is_unset(tmp_req.labels): - request.labels_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.labels, 'Labels', 'json') + tmp_req: pai_studio_20220112_models.ListComponentsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.ListComponentsResponse: + """ + @summary 获取组件列表 + + @param tmp_req: ListComponentsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListComponentsResponse + """ + UtilClient.validate_model(tmp_req) + request = pai_studio_20220112_models.ListComponentsShrinkRequest() + OpenApiUtilClient.convert(tmp_req, request) + if not UtilClient.is_unset(tmp_req.labels): + request.labels_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.labels, 'Labels', 'json') + query = {} + if not UtilClient.is_unset(request.component_id): + query['ComponentId'] = request.component_id + if not UtilClient.is_unset(request.component_ids): + query['ComponentIds'] = request.component_ids + if not UtilClient.is_unset(request.labels_shrink): + query['Labels'] = request.labels_shrink + if not UtilClient.is_unset(request.name): + query['Name'] = request.name + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.provider): + query['Provider'] = request.provider + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListComponents', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/components', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.ListComponentsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_components( + self, + request: pai_studio_20220112_models.ListComponentsRequest, + ) -> pai_studio_20220112_models.ListComponentsResponse: + """ + @summary 获取组件列表 + + @param request: ListComponentsRequest + @return: ListComponentsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_components_with_options(request, headers, runtime) + + async def list_components_async( + self, + request: pai_studio_20220112_models.ListComponentsRequest, + ) -> pai_studio_20220112_models.ListComponentsResponse: + """ + @summary 获取组件列表 + + @param request: ListComponentsRequest + @return: ListComponentsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_components_with_options_async(request, headers, runtime) + + def list_instance_jobs_with_options( + self, + request: pai_studio_20220112_models.ListInstanceJobsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.ListInstanceJobsResponse: + """ + @summary 获取实例任务列表 + + @param request: ListInstanceJobsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListInstanceJobsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.instance_job_type): + query['InstanceJobType'] = request.instance_job_type + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.status): + query['Status'] = request.status + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListInstanceJobs', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/instancejobs', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.ListInstanceJobsResponse(), + self.call_api(params, req, runtime) + ) + + async def list_instance_jobs_with_options_async( + self, + request: pai_studio_20220112_models.ListInstanceJobsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.ListInstanceJobsResponse: + """ + @summary 获取实例任务列表 + + @param request: ListInstanceJobsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListInstanceJobsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.instance_job_type): + query['InstanceJobType'] = request.instance_job_type + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.status): + query['Status'] = request.status + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListInstanceJobs', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/instancejobs', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.ListInstanceJobsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_instance_jobs( + self, + request: pai_studio_20220112_models.ListInstanceJobsRequest, + ) -> pai_studio_20220112_models.ListInstanceJobsResponse: + """ + @summary 获取实例任务列表 + + @param request: ListInstanceJobsRequest + @return: ListInstanceJobsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_instance_jobs_with_options(request, headers, runtime) + + async def list_instance_jobs_async( + self, + request: pai_studio_20220112_models.ListInstanceJobsRequest, + ) -> pai_studio_20220112_models.ListInstanceJobsResponse: + """ + @summary 获取实例任务列表 + + @param request: ListInstanceJobsRequest + @return: ListInstanceJobsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_instance_jobs_with_options_async(request, headers, runtime) + + def list_node_gpumetrics_with_options( + self, + quota_id: str, + request: pai_studio_20220112_models.ListNodeGPUMetricsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.ListNodeGPUMetricsResponse: + """ + @summary 查询某资源配额下所有节点的性能指标列表 + + @param request: ListNodeGPUMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListNodeGPUMetricsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.end_time): + query['EndTime'] = request.end_time + if not UtilClient.is_unset(request.gputype): + query['GPUType'] = request.gputype + if not UtilClient.is_unset(request.metric_type): + query['MetricType'] = request.metric_type + if not UtilClient.is_unset(request.node_type): + query['NodeType'] = request.node_type + if not UtilClient.is_unset(request.start_time): + query['StartTime'] = request.start_time + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListNodeGPUMetrics', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/quotas/{OpenApiUtilClient.get_encode_param(quota_id)}/nodegpumetrics', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.ListNodeGPUMetricsResponse(), + self.call_api(params, req, runtime) + ) + + async def list_node_gpumetrics_with_options_async( + self, + quota_id: str, + request: pai_studio_20220112_models.ListNodeGPUMetricsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.ListNodeGPUMetricsResponse: + """ + @summary 查询某资源配额下所有节点的性能指标列表 + + @param request: ListNodeGPUMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListNodeGPUMetricsResponse + """ + UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.component_id): - query['ComponentId'] = request.component_id - if not UtilClient.is_unset(request.component_ids): - query['ComponentIds'] = request.component_ids - if not UtilClient.is_unset(request.labels_shrink): - query['Labels'] = request.labels_shrink - if not UtilClient.is_unset(request.name): - query['Name'] = request.name - if not UtilClient.is_unset(request.order): - query['Order'] = request.order - if not UtilClient.is_unset(request.page_number): - query['PageNumber'] = request.page_number - if not UtilClient.is_unset(request.page_size): - query['PageSize'] = request.page_size - if not UtilClient.is_unset(request.provider): - query['Provider'] = request.provider - if not UtilClient.is_unset(request.sort_by): - query['SortBy'] = request.sort_by - if not UtilClient.is_unset(request.workspace_id): - query['WorkspaceId'] = request.workspace_id + if not UtilClient.is_unset(request.end_time): + query['EndTime'] = request.end_time + if not UtilClient.is_unset(request.gputype): + query['GPUType'] = request.gputype + if not UtilClient.is_unset(request.metric_type): + query['MetricType'] = request.metric_type + if not UtilClient.is_unset(request.node_type): + query['NodeType'] = request.node_type + if not UtilClient.is_unset(request.start_time): + query['StartTime'] = request.start_time req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListComponents', + action='ListNodeGPUMetrics', version='2022-01-12', protocol='HTTPS', - pathname=f'/api/v1/components', + pathname=f'/api/v1/quotas/{OpenApiUtilClient.get_encode_param(quota_id)}/nodegpumetrics', method='GET', auth_type='AK', style='ROA', @@ -5051,55 +8404,68 @@ async def list_components_with_options_async( body_type='json' ) return TeaCore.from_map( - pai_studio_20220112_models.ListComponentsResponse(), + pai_studio_20220112_models.ListNodeGPUMetricsResponse(), await self.call_api_async(params, req, runtime) ) - def list_components( + def list_node_gpumetrics( self, - request: pai_studio_20220112_models.ListComponentsRequest, - ) -> pai_studio_20220112_models.ListComponentsResponse: + quota_id: str, + request: pai_studio_20220112_models.ListNodeGPUMetricsRequest, + ) -> pai_studio_20220112_models.ListNodeGPUMetricsResponse: + """ + @summary 查询某资源配额下所有节点的性能指标列表 + + @param request: ListNodeGPUMetricsRequest + @return: ListNodeGPUMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.list_components_with_options(request, headers, runtime) + return self.list_node_gpumetrics_with_options(quota_id, request, headers, runtime) - async def list_components_async( + async def list_node_gpumetrics_async( self, - request: pai_studio_20220112_models.ListComponentsRequest, - ) -> pai_studio_20220112_models.ListComponentsResponse: + quota_id: str, + request: pai_studio_20220112_models.ListNodeGPUMetricsRequest, + ) -> pai_studio_20220112_models.ListNodeGPUMetricsResponse: + """ + @summary 查询某资源配额下所有节点的性能指标列表 + + @param request: ListNodeGPUMetricsRequest + @return: ListNodeGPUMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.list_components_with_options_async(request, headers, runtime) + return await self.list_node_gpumetrics_with_options_async(quota_id, request, headers, runtime) - def list_instance_jobs_with_options( + def list_node_pods_with_options( self, - request: pai_studio_20220112_models.ListInstanceJobsRequest, + node_id: str, + request: pai_studio_20220112_models.ListNodePodsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> pai_studio_20220112_models.ListInstanceJobsResponse: + ) -> pai_studio_20220112_models.ListNodePodsResponse: + """ + @summary 您可以通过ListNodePods得到节点上的Pod信息 + + @param request: ListNodePodsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListNodePodsResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.instance_job_type): - query['InstanceJobType'] = request.instance_job_type - if not UtilClient.is_unset(request.order): - query['Order'] = request.order - if not UtilClient.is_unset(request.page_number): - query['PageNumber'] = request.page_number - if not UtilClient.is_unset(request.page_size): - query['PageSize'] = request.page_size - if not UtilClient.is_unset(request.sort_by): - query['SortBy'] = request.sort_by - if not UtilClient.is_unset(request.status): - query['Status'] = request.status + if not UtilClient.is_unset(request.resource_group_id): + query['ResourceGroupId'] = request.resource_group_id req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListInstanceJobs', + action='ListNodePods', version='2022-01-12', protocol='HTTPS', - pathname=f'/api/v1/instancejobs', + pathname=f'/api/v1/nodes/{OpenApiUtilClient.get_encode_param(node_id)}/Pods', method='GET', auth_type='AK', style='ROA', @@ -5107,39 +8473,38 @@ def list_instance_jobs_with_options( body_type='json' ) return TeaCore.from_map( - pai_studio_20220112_models.ListInstanceJobsResponse(), + pai_studio_20220112_models.ListNodePodsResponse(), self.call_api(params, req, runtime) ) - async def list_instance_jobs_with_options_async( + async def list_node_pods_with_options_async( self, - request: pai_studio_20220112_models.ListInstanceJobsRequest, + node_id: str, + request: pai_studio_20220112_models.ListNodePodsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> pai_studio_20220112_models.ListInstanceJobsResponse: + ) -> pai_studio_20220112_models.ListNodePodsResponse: + """ + @summary 您可以通过ListNodePods得到节点上的Pod信息 + + @param request: ListNodePodsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListNodePodsResponse + """ UtilClient.validate_model(request) query = {} - if not UtilClient.is_unset(request.instance_job_type): - query['InstanceJobType'] = request.instance_job_type - if not UtilClient.is_unset(request.order): - query['Order'] = request.order - if not UtilClient.is_unset(request.page_number): - query['PageNumber'] = request.page_number - if not UtilClient.is_unset(request.page_size): - query['PageSize'] = request.page_size - if not UtilClient.is_unset(request.sort_by): - query['SortBy'] = request.sort_by - if not UtilClient.is_unset(request.status): - query['Status'] = request.status + if not UtilClient.is_unset(request.resource_group_id): + query['ResourceGroupId'] = request.resource_group_id req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListInstanceJobs', + action='ListNodePods', version='2022-01-12', protocol='HTTPS', - pathname=f'/api/v1/instancejobs', + pathname=f'/api/v1/nodes/{OpenApiUtilClient.get_encode_param(node_id)}/Pods', method='GET', auth_type='AK', style='ROA', @@ -5147,25 +8512,39 @@ async def list_instance_jobs_with_options_async( body_type='json' ) return TeaCore.from_map( - pai_studio_20220112_models.ListInstanceJobsResponse(), + pai_studio_20220112_models.ListNodePodsResponse(), await self.call_api_async(params, req, runtime) ) - def list_instance_jobs( + def list_node_pods( self, - request: pai_studio_20220112_models.ListInstanceJobsRequest, - ) -> pai_studio_20220112_models.ListInstanceJobsResponse: + node_id: str, + request: pai_studio_20220112_models.ListNodePodsRequest, + ) -> pai_studio_20220112_models.ListNodePodsResponse: + """ + @summary 您可以通过ListNodePods得到节点上的Pod信息 + + @param request: ListNodePodsRequest + @return: ListNodePodsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.list_instance_jobs_with_options(request, headers, runtime) + return self.list_node_pods_with_options(node_id, request, headers, runtime) - async def list_instance_jobs_async( + async def list_node_pods_async( self, - request: pai_studio_20220112_models.ListInstanceJobsRequest, - ) -> pai_studio_20220112_models.ListInstanceJobsResponse: + node_id: str, + request: pai_studio_20220112_models.ListNodePodsRequest, + ) -> pai_studio_20220112_models.ListNodePodsResponse: + """ + @summary 您可以通过ListNodePods得到节点上的Pod信息 + + @param request: ListNodePodsRequest + @return: ListNodePodsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.list_instance_jobs_with_options_async(request, headers, runtime) + return await self.list_node_pods_with_options_async(node_id, request, headers, runtime) def list_node_types_with_options( self, @@ -5173,6 +8552,14 @@ def list_node_types_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListNodeTypesResponse: + """ + @summary 获取节点规格列表 + + @param request: ListNodeTypesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListNodeTypesResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.accelerator_type): @@ -5211,6 +8598,14 @@ async def list_node_types_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListNodeTypesResponse: + """ + @summary 获取节点规格列表 + + @param request: ListNodeTypesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListNodeTypesResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.accelerator_type): @@ -5247,6 +8642,12 @@ def list_node_types( self, request: pai_studio_20220112_models.ListNodeTypesRequest, ) -> pai_studio_20220112_models.ListNodeTypesResponse: + """ + @summary 获取节点规格列表 + + @param request: ListNodeTypesRequest + @return: ListNodeTypesResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.list_node_types_with_options(request, headers, runtime) @@ -5255,6 +8656,12 @@ async def list_node_types_async( self, request: pai_studio_20220112_models.ListNodeTypesRequest, ) -> pai_studio_20220112_models.ListNodeTypesResponse: + """ + @summary 获取节点规格列表 + + @param request: ListNodeTypesRequest + @return: ListNodeTypesResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.list_node_types_with_options_async(request, headers, runtime) @@ -5265,12 +8672,26 @@ def list_nodes_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListNodesResponse: + """ + @summary 获取资源节点列表 + + @param request: ListNodesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListNodesResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.accelerator_type): query['AcceleratorType'] = request.accelerator_type + if not UtilClient.is_unset(request.filter_by_quota_id): + query['FilterByQuotaId'] = request.filter_by_quota_id + if not UtilClient.is_unset(request.filter_by_resource_group_ids): + query['FilterByResourceGroupIds'] = request.filter_by_resource_group_ids if not UtilClient.is_unset(request.gputype): query['GPUType'] = request.gputype + if not UtilClient.is_unset(request.node_names): + query['NodeNames'] = request.node_names if not UtilClient.is_unset(request.node_statuses): query['NodeStatuses'] = request.node_statuses if not UtilClient.is_unset(request.node_types): @@ -5289,6 +8710,8 @@ def list_nodes_with_options( query['ResourceGroupIds'] = request.resource_group_ids if not UtilClient.is_unset(request.sort_by): query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.verbose): + query['Verbose'] = request.verbose req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) @@ -5315,12 +8738,26 @@ async def list_nodes_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListNodesResponse: + """ + @summary 获取资源节点列表 + + @param request: ListNodesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListNodesResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.accelerator_type): query['AcceleratorType'] = request.accelerator_type + if not UtilClient.is_unset(request.filter_by_quota_id): + query['FilterByQuotaId'] = request.filter_by_quota_id + if not UtilClient.is_unset(request.filter_by_resource_group_ids): + query['FilterByResourceGroupIds'] = request.filter_by_resource_group_ids if not UtilClient.is_unset(request.gputype): query['GPUType'] = request.gputype + if not UtilClient.is_unset(request.node_names): + query['NodeNames'] = request.node_names if not UtilClient.is_unset(request.node_statuses): query['NodeStatuses'] = request.node_statuses if not UtilClient.is_unset(request.node_types): @@ -5339,6 +8776,8 @@ async def list_nodes_with_options_async( query['ResourceGroupIds'] = request.resource_group_ids if not UtilClient.is_unset(request.sort_by): query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.verbose): + query['Verbose'] = request.verbose req = open_api_models.OpenApiRequest( headers=headers, query=OpenApiUtilClient.query(query) @@ -5363,6 +8802,12 @@ def list_nodes( self, request: pai_studio_20220112_models.ListNodesRequest, ) -> pai_studio_20220112_models.ListNodesResponse: + """ + @summary 获取资源节点列表 + + @param request: ListNodesRequest + @return: ListNodesResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.list_nodes_with_options(request, headers, runtime) @@ -5371,6 +8816,12 @@ async def list_nodes_async( self, request: pai_studio_20220112_models.ListNodesRequest, ) -> pai_studio_20220112_models.ListNodesResponse: + """ + @summary 获取资源节点列表 + + @param request: ListNodesRequest + @return: ListNodesResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.list_nodes_with_options_async(request, headers, runtime) @@ -5381,6 +8832,14 @@ def list_operations_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListOperationsResponse: + """ + @summary 获取资源变更列表 + + @param request: ListOperationsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListOperationsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.object_id): @@ -5427,6 +8886,14 @@ async def list_operations_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListOperationsResponse: + """ + @summary 获取资源变更列表 + + @param request: ListOperationsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListOperationsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.object_id): @@ -5471,6 +8938,12 @@ def list_operations( self, request: pai_studio_20220112_models.ListOperationsRequest, ) -> pai_studio_20220112_models.ListOperationsResponse: + """ + @summary 获取资源变更列表 + + @param request: ListOperationsRequest + @return: ListOperationsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.list_operations_with_options(request, headers, runtime) @@ -5479,6 +8952,12 @@ async def list_operations_async( self, request: pai_studio_20220112_models.ListOperationsRequest, ) -> pai_studio_20220112_models.ListOperationsResponse: + """ + @summary 获取资源变更列表 + + @param request: ListOperationsRequest + @return: ListOperationsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.list_operations_with_options_async(request, headers, runtime) @@ -5488,6 +8967,13 @@ def list_permissions_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListPermissionsResponse: + """ + @summary ListPermissions + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListPermissionsResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -5512,14 +8998,307 @@ async def list_permissions_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListPermissionsResponse: + """ + @summary ListPermissions + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListPermissionsResponse + """ + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='ListPermissions', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/permissions', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.ListPermissionsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_permissions(self) -> pai_studio_20220112_models.ListPermissionsResponse: + """ + @summary ListPermissions + + @return: ListPermissionsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_permissions_with_options(headers, runtime) + + async def list_permissions_async(self) -> pai_studio_20220112_models.ListPermissionsResponse: + """ + @summary ListPermissions + + @return: ListPermissionsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_permissions_with_options_async(headers, runtime) + + def list_quota_users_with_options( + self, + quota_id: str, + request: pai_studio_20220112_models.ListQuotaUsersRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.ListQuotaUsersResponse: + """ + @summary 获取当前资源配额用户列表和其所使用的资源 + + @param request: ListQuotaUsersRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListQuotaUsersResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.self_only): + query['SelfOnly'] = request.self_only + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.user_id): + query['UserId'] = request.user_id + if not UtilClient.is_unset(request.username): + query['Username'] = request.username + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListQuotaUsers', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/quotas/{OpenApiUtilClient.get_encode_param(quota_id)}/users', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.ListQuotaUsersResponse(), + self.call_api(params, req, runtime) + ) + + async def list_quota_users_with_options_async( + self, + quota_id: str, + request: pai_studio_20220112_models.ListQuotaUsersRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.ListQuotaUsersResponse: + """ + @summary 获取当前资源配额用户列表和其所使用的资源 + + @param request: ListQuotaUsersRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListQuotaUsersResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.self_only): + query['SelfOnly'] = request.self_only + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.user_id): + query['UserId'] = request.user_id + if not UtilClient.is_unset(request.username): + query['Username'] = request.username + if not UtilClient.is_unset(request.workspace_id): + query['WorkspaceId'] = request.workspace_id + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListQuotaUsers', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/quotas/{OpenApiUtilClient.get_encode_param(quota_id)}/users', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.ListQuotaUsersResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_quota_users( + self, + quota_id: str, + request: pai_studio_20220112_models.ListQuotaUsersRequest, + ) -> pai_studio_20220112_models.ListQuotaUsersResponse: + """ + @summary 获取当前资源配额用户列表和其所使用的资源 + + @param request: ListQuotaUsersRequest + @return: ListQuotaUsersResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_quota_users_with_options(quota_id, request, headers, runtime) + + async def list_quota_users_async( + self, + quota_id: str, + request: pai_studio_20220112_models.ListQuotaUsersRequest, + ) -> pai_studio_20220112_models.ListQuotaUsersResponse: + """ + @summary 获取当前资源配额用户列表和其所使用的资源 + + @param request: ListQuotaUsersRequest + @return: ListQuotaUsersResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_quota_users_with_options_async(quota_id, request, headers, runtime) + + def list_quota_workloads_with_options( + self, + quota_id: str, + request: pai_studio_20220112_models.ListQuotaWorkloadsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.ListQuotaWorkloadsResponse: + """ + @summary 您可以通过此API获取Quota上的任务信息 + + @param request: ListQuotaWorkloadsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListQuotaWorkloadsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.before_workload_id): + query['BeforeWorkloadId'] = request.before_workload_id + if not UtilClient.is_unset(request.node_name): + query['NodeName'] = request.node_name + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.show_own): + query['ShowOwn'] = request.show_own + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.status): + query['Status'] = request.status + if not UtilClient.is_unset(request.sub_quota_ids): + query['SubQuotaIds'] = request.sub_quota_ids + if not UtilClient.is_unset(request.user_ids): + query['UserIds'] = request.user_ids + if not UtilClient.is_unset(request.workload_created_time_range): + query['WorkloadCreatedTimeRange'] = request.workload_created_time_range + if not UtilClient.is_unset(request.workload_ids): + query['WorkloadIds'] = request.workload_ids + if not UtilClient.is_unset(request.workload_type): + query['WorkloadType'] = request.workload_type + if not UtilClient.is_unset(request.workspace_ids): + query['WorkspaceIds'] = request.workspace_ids + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListQuotaWorkloads', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/quotas/{OpenApiUtilClient.get_encode_param(quota_id)}/workloads', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.ListQuotaWorkloadsResponse(), + self.call_api(params, req, runtime) + ) + + async def list_quota_workloads_with_options_async( + self, + quota_id: str, + request: pai_studio_20220112_models.ListQuotaWorkloadsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.ListQuotaWorkloadsResponse: + """ + @summary 您可以通过此API获取Quota上的任务信息 + + @param request: ListQuotaWorkloadsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListQuotaWorkloadsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.before_workload_id): + query['BeforeWorkloadId'] = request.before_workload_id + if not UtilClient.is_unset(request.node_name): + query['NodeName'] = request.node_name + if not UtilClient.is_unset(request.order): + query['Order'] = request.order + if not UtilClient.is_unset(request.page_number): + query['PageNumber'] = request.page_number + if not UtilClient.is_unset(request.page_size): + query['PageSize'] = request.page_size + if not UtilClient.is_unset(request.show_own): + query['ShowOwn'] = request.show_own + if not UtilClient.is_unset(request.sort_by): + query['SortBy'] = request.sort_by + if not UtilClient.is_unset(request.status): + query['Status'] = request.status + if not UtilClient.is_unset(request.sub_quota_ids): + query['SubQuotaIds'] = request.sub_quota_ids + if not UtilClient.is_unset(request.user_ids): + query['UserIds'] = request.user_ids + if not UtilClient.is_unset(request.workload_created_time_range): + query['WorkloadCreatedTimeRange'] = request.workload_created_time_range + if not UtilClient.is_unset(request.workload_ids): + query['WorkloadIds'] = request.workload_ids + if not UtilClient.is_unset(request.workload_type): + query['WorkloadType'] = request.workload_type + if not UtilClient.is_unset(request.workspace_ids): + query['WorkspaceIds'] = request.workspace_ids req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListPermissions', + action='ListQuotaWorkloads', version='2022-01-12', protocol='HTTPS', - pathname=f'/api/v1/permissions', + pathname=f'/api/v1/quotas/{OpenApiUtilClient.get_encode_param(quota_id)}/workloads', method='GET', auth_type='AK', style='ROA', @@ -5527,19 +9306,39 @@ async def list_permissions_with_options_async( body_type='json' ) return TeaCore.from_map( - pai_studio_20220112_models.ListPermissionsResponse(), + pai_studio_20220112_models.ListQuotaWorkloadsResponse(), await self.call_api_async(params, req, runtime) ) - def list_permissions(self) -> pai_studio_20220112_models.ListPermissionsResponse: + def list_quota_workloads( + self, + quota_id: str, + request: pai_studio_20220112_models.ListQuotaWorkloadsRequest, + ) -> pai_studio_20220112_models.ListQuotaWorkloadsResponse: + """ + @summary 您可以通过此API获取Quota上的任务信息 + + @param request: ListQuotaWorkloadsRequest + @return: ListQuotaWorkloadsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.list_permissions_with_options(headers, runtime) + return self.list_quota_workloads_with_options(quota_id, request, headers, runtime) - async def list_permissions_async(self) -> pai_studio_20220112_models.ListPermissionsResponse: + async def list_quota_workloads_async( + self, + quota_id: str, + request: pai_studio_20220112_models.ListQuotaWorkloadsRequest, + ) -> pai_studio_20220112_models.ListQuotaWorkloadsResponse: + """ + @summary 您可以通过此API获取Quota上的任务信息 + + @param request: ListQuotaWorkloadsRequest + @return: ListQuotaWorkloadsResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.list_permissions_with_options_async(headers, runtime) + return await self.list_quota_workloads_with_options_async(quota_id, request, headers, runtime) def list_quotas_with_options( self, @@ -5547,8 +9346,20 @@ def list_quotas_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListQuotasResponse: + """ + @summary 获取Quota列表 + + @param request: ListQuotasRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListQuotasResponse + """ UtilClient.validate_model(request) query = {} + if not UtilClient.is_unset(request.labels): + query['Labels'] = request.labels + if not UtilClient.is_unset(request.layout_mode): + query['LayoutMode'] = request.layout_mode if not UtilClient.is_unset(request.order): query['Order'] = request.order if not UtilClient.is_unset(request.page_number): @@ -5567,6 +9378,8 @@ def list_quotas_with_options( query['SortBy'] = request.sort_by if not UtilClient.is_unset(request.statuses): query['Statuses'] = request.statuses + if not UtilClient.is_unset(request.verbose): + query['Verbose'] = request.verbose if not UtilClient.is_unset(request.workspace_ids): query['WorkspaceIds'] = request.workspace_ids req = open_api_models.OpenApiRequest( @@ -5595,8 +9408,20 @@ async def list_quotas_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListQuotasResponse: + """ + @summary 获取Quota列表 + + @param request: ListQuotasRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListQuotasResponse + """ UtilClient.validate_model(request) query = {} + if not UtilClient.is_unset(request.labels): + query['Labels'] = request.labels + if not UtilClient.is_unset(request.layout_mode): + query['LayoutMode'] = request.layout_mode if not UtilClient.is_unset(request.order): query['Order'] = request.order if not UtilClient.is_unset(request.page_number): @@ -5615,6 +9440,8 @@ async def list_quotas_with_options_async( query['SortBy'] = request.sort_by if not UtilClient.is_unset(request.statuses): query['Statuses'] = request.statuses + if not UtilClient.is_unset(request.verbose): + query['Verbose'] = request.verbose if not UtilClient.is_unset(request.workspace_ids): query['WorkspaceIds'] = request.workspace_ids req = open_api_models.OpenApiRequest( @@ -5641,6 +9468,12 @@ def list_quotas( self, request: pai_studio_20220112_models.ListQuotasRequest, ) -> pai_studio_20220112_models.ListQuotasResponse: + """ + @summary 获取Quota列表 + + @param request: ListQuotasRequest + @return: ListQuotasResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.list_quotas_with_options(request, headers, runtime) @@ -5649,6 +9482,12 @@ async def list_quotas_async( self, request: pai_studio_20220112_models.ListQuotasRequest, ) -> pai_studio_20220112_models.ListQuotasResponse: + """ + @summary 获取Quota列表 + + @param request: ListQuotasRequest + @return: ListQuotasResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.list_quotas_with_options_async(request, headers, runtime) @@ -5660,6 +9499,14 @@ def list_resource_group_machine_groups_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListResourceGroupMachineGroupsResponse: + """ + @summary list machine groups + + @param request: ListResourceGroupMachineGroupsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListResourceGroupMachineGroupsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.creator_id): @@ -5670,6 +9517,8 @@ def list_resource_group_machine_groups_with_options( query['Name'] = request.name if not UtilClient.is_unset(request.order): query['Order'] = request.order + if not UtilClient.is_unset(request.order_instance_id): + query['OrderInstanceId'] = request.order_instance_id if not UtilClient.is_unset(request.page_number): query['PageNumber'] = request.page_number if not UtilClient.is_unset(request.page_size): @@ -5711,6 +9560,14 @@ async def list_resource_group_machine_groups_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListResourceGroupMachineGroupsResponse: + """ + @summary list machine groups + + @param request: ListResourceGroupMachineGroupsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListResourceGroupMachineGroupsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.creator_id): @@ -5721,6 +9578,8 @@ async def list_resource_group_machine_groups_with_options_async( query['Name'] = request.name if not UtilClient.is_unset(request.order): query['Order'] = request.order + if not UtilClient.is_unset(request.order_instance_id): + query['OrderInstanceId'] = request.order_instance_id if not UtilClient.is_unset(request.page_number): query['PageNumber'] = request.page_number if not UtilClient.is_unset(request.page_size): @@ -5760,6 +9619,12 @@ def list_resource_group_machine_groups( resource_group_id: str, request: pai_studio_20220112_models.ListResourceGroupMachineGroupsRequest, ) -> pai_studio_20220112_models.ListResourceGroupMachineGroupsResponse: + """ + @summary list machine groups + + @param request: ListResourceGroupMachineGroupsRequest + @return: ListResourceGroupMachineGroupsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.list_resource_group_machine_groups_with_options(resource_group_id, request, headers, runtime) @@ -5769,6 +9634,12 @@ async def list_resource_group_machine_groups_async( resource_group_id: str, request: pai_studio_20220112_models.ListResourceGroupMachineGroupsRequest, ) -> pai_studio_20220112_models.ListResourceGroupMachineGroupsResponse: + """ + @summary list machine groups + + @param request: ListResourceGroupMachineGroupsRequest + @return: ListResourceGroupMachineGroupsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.list_resource_group_machine_groups_with_options_async(resource_group_id, request, headers, runtime) @@ -5779,6 +9650,14 @@ def list_resource_groups_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListResourceGroupsResponse: + """ + @summary list resource group + + @param request: ListResourceGroupsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListResourceGroupsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.computing_resource_provider): @@ -5825,6 +9704,14 @@ async def list_resource_groups_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListResourceGroupsResponse: + """ + @summary list resource group + + @param request: ListResourceGroupsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListResourceGroupsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.computing_resource_provider): @@ -5869,6 +9756,12 @@ def list_resource_groups( self, request: pai_studio_20220112_models.ListResourceGroupsRequest, ) -> pai_studio_20220112_models.ListResourceGroupsResponse: + """ + @summary list resource group + + @param request: ListResourceGroupsRequest + @return: ListResourceGroupsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.list_resource_groups_with_options(request, headers, runtime) @@ -5877,10 +9770,252 @@ async def list_resource_groups_async( self, request: pai_studio_20220112_models.ListResourceGroupsRequest, ) -> pai_studio_20220112_models.ListResourceGroupsResponse: + """ + @summary list resource group + + @param request: ListResourceGroupsRequest + @return: ListResourceGroupsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.list_resource_groups_with_options_async(request, headers, runtime) + def list_spots_stock_preview_with_options( + self, + request: pai_studio_20220112_models.ListSpotsStockPreviewRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.ListSpotsStockPreviewResponse: + """ + @summary 获取多个抢占式实例的库存概览 + + @param request: ListSpotsStockPreviewRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListSpotsStockPreviewResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.instance_types): + query['InstanceTypes'] = request.instance_types + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListSpotsStockPreview', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/spots/stockpreview', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.ListSpotsStockPreviewResponse(), + self.call_api(params, req, runtime) + ) + + async def list_spots_stock_preview_with_options_async( + self, + request: pai_studio_20220112_models.ListSpotsStockPreviewRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.ListSpotsStockPreviewResponse: + """ + @summary 获取多个抢占式实例的库存概览 + + @param request: ListSpotsStockPreviewRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListSpotsStockPreviewResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.instance_types): + query['InstanceTypes'] = request.instance_types + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListSpotsStockPreview', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/spots/stockpreview', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.ListSpotsStockPreviewResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_spots_stock_preview( + self, + request: pai_studio_20220112_models.ListSpotsStockPreviewRequest, + ) -> pai_studio_20220112_models.ListSpotsStockPreviewResponse: + """ + @summary 获取多个抢占式实例的库存概览 + + @param request: ListSpotsStockPreviewRequest + @return: ListSpotsStockPreviewResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_spots_stock_preview_with_options(request, headers, runtime) + + async def list_spots_stock_preview_async( + self, + request: pai_studio_20220112_models.ListSpotsStockPreviewRequest, + ) -> pai_studio_20220112_models.ListSpotsStockPreviewResponse: + """ + @summary 获取多个抢占式实例的库存概览 + + @param request: ListSpotsStockPreviewRequest + @return: ListSpotsStockPreviewResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_spots_stock_preview_with_options_async(request, headers, runtime) + + def list_tag_resources_with_options( + self, + tmp_req: pai_studio_20220112_models.ListTagResourcesRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.ListTagResourcesResponse: + """ + @summary 查标签接口 + + @param tmp_req: ListTagResourcesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListTagResourcesResponse + """ + UtilClient.validate_model(tmp_req) + request = pai_studio_20220112_models.ListTagResourcesShrinkRequest() + OpenApiUtilClient.convert(tmp_req, request) + if not UtilClient.is_unset(tmp_req.resource_id): + request.resource_id_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.resource_id, 'ResourceId', 'json') + if not UtilClient.is_unset(tmp_req.tag): + request.tag_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.tag, 'Tag', 'json') + query = {} + if not UtilClient.is_unset(request.next_token): + query['NextToken'] = request.next_token + if not UtilClient.is_unset(request.region_id): + query['RegionId'] = request.region_id + if not UtilClient.is_unset(request.resource_id_shrink): + query['ResourceId'] = request.resource_id_shrink + if not UtilClient.is_unset(request.resource_type): + query['ResourceType'] = request.resource_type + if not UtilClient.is_unset(request.tag_shrink): + query['Tag'] = request.tag_shrink + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListTagResources', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/tags', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.ListTagResourcesResponse(), + self.call_api(params, req, runtime) + ) + + async def list_tag_resources_with_options_async( + self, + tmp_req: pai_studio_20220112_models.ListTagResourcesRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.ListTagResourcesResponse: + """ + @summary 查标签接口 + + @param tmp_req: ListTagResourcesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListTagResourcesResponse + """ + UtilClient.validate_model(tmp_req) + request = pai_studio_20220112_models.ListTagResourcesShrinkRequest() + OpenApiUtilClient.convert(tmp_req, request) + if not UtilClient.is_unset(tmp_req.resource_id): + request.resource_id_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.resource_id, 'ResourceId', 'json') + if not UtilClient.is_unset(tmp_req.tag): + request.tag_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.tag, 'Tag', 'json') + query = {} + if not UtilClient.is_unset(request.next_token): + query['NextToken'] = request.next_token + if not UtilClient.is_unset(request.region_id): + query['RegionId'] = request.region_id + if not UtilClient.is_unset(request.resource_id_shrink): + query['ResourceId'] = request.resource_id_shrink + if not UtilClient.is_unset(request.resource_type): + query['ResourceType'] = request.resource_type + if not UtilClient.is_unset(request.tag_shrink): + query['Tag'] = request.tag_shrink + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListTagResources', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/tags', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.ListTagResourcesResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_tag_resources( + self, + request: pai_studio_20220112_models.ListTagResourcesRequest, + ) -> pai_studio_20220112_models.ListTagResourcesResponse: + """ + @summary 查标签接口 + + @param request: ListTagResourcesRequest + @return: ListTagResourcesResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_tag_resources_with_options(request, headers, runtime) + + async def list_tag_resources_async( + self, + request: pai_studio_20220112_models.ListTagResourcesRequest, + ) -> pai_studio_20220112_models.ListTagResourcesResponse: + """ + @summary 查标签接口 + + @param request: ListTagResourcesRequest + @return: ListTagResourcesResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_tag_resources_with_options_async(request, headers, runtime) + def list_training_job_events_with_options( self, training_job_id: str, @@ -5888,6 +10023,14 @@ def list_training_job_events_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListTrainingJobEventsResponse: + """ + @summary 获取指定TrainingJob的事件。 + + @param request: ListTrainingJobEventsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListTrainingJobEventsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.end_time): @@ -5927,6 +10070,14 @@ async def list_training_job_events_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListTrainingJobEventsResponse: + """ + @summary 获取指定TrainingJob的事件。 + + @param request: ListTrainingJobEventsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListTrainingJobEventsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.end_time): @@ -5964,6 +10115,12 @@ def list_training_job_events( training_job_id: str, request: pai_studio_20220112_models.ListTrainingJobEventsRequest, ) -> pai_studio_20220112_models.ListTrainingJobEventsResponse: + """ + @summary 获取指定TrainingJob的事件。 + + @param request: ListTrainingJobEventsRequest + @return: ListTrainingJobEventsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.list_training_job_events_with_options(training_job_id, request, headers, runtime) @@ -5973,6 +10130,12 @@ async def list_training_job_events_async( training_job_id: str, request: pai_studio_20220112_models.ListTrainingJobEventsRequest, ) -> pai_studio_20220112_models.ListTrainingJobEventsResponse: + """ + @summary 获取指定TrainingJob的事件。 + + @param request: ListTrainingJobEventsRequest + @return: ListTrainingJobEventsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.list_training_job_events_with_options_async(training_job_id, request, headers, runtime) @@ -5985,6 +10148,14 @@ def list_training_job_instance_events_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListTrainingJobInstanceEventsResponse: + """ + @summary 获取指定Instance(TrainingJob的运行单元)的日志。 + + @param request: ListTrainingJobInstanceEventsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListTrainingJobInstanceEventsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.end_time): @@ -6025,6 +10196,14 @@ async def list_training_job_instance_events_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListTrainingJobInstanceEventsResponse: + """ + @summary 获取指定Instance(TrainingJob的运行单元)的日志。 + + @param request: ListTrainingJobInstanceEventsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListTrainingJobInstanceEventsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.end_time): @@ -6063,6 +10242,12 @@ def list_training_job_instance_events( instance_id: str, request: pai_studio_20220112_models.ListTrainingJobInstanceEventsRequest, ) -> pai_studio_20220112_models.ListTrainingJobInstanceEventsResponse: + """ + @summary 获取指定Instance(TrainingJob的运行单元)的日志。 + + @param request: ListTrainingJobInstanceEventsRequest + @return: ListTrainingJobInstanceEventsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.list_training_job_instance_events_with_options(training_job_id, instance_id, request, headers, runtime) @@ -6073,6 +10258,12 @@ async def list_training_job_instance_events_async( instance_id: str, request: pai_studio_20220112_models.ListTrainingJobInstanceEventsRequest, ) -> pai_studio_20220112_models.ListTrainingJobInstanceEventsResponse: + """ + @summary 获取指定Instance(TrainingJob的运行单元)的日志。 + + @param request: ListTrainingJobInstanceEventsRequest + @return: ListTrainingJobInstanceEventsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.list_training_job_instance_events_with_options_async(training_job_id, instance_id, request, headers, runtime) @@ -6084,6 +10275,14 @@ def list_training_job_instance_metrics_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListTrainingJobInstanceMetricsResponse: + """ + @summary 获取Training Job实例的Metrics + + @param request: ListTrainingJobInstanceMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListTrainingJobInstanceMetricsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.end_time): @@ -6125,6 +10324,14 @@ async def list_training_job_instance_metrics_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListTrainingJobInstanceMetricsResponse: + """ + @summary 获取Training Job实例的Metrics + + @param request: ListTrainingJobInstanceMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListTrainingJobInstanceMetricsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.end_time): @@ -6164,6 +10371,12 @@ def list_training_job_instance_metrics( training_job_id: str, request: pai_studio_20220112_models.ListTrainingJobInstanceMetricsRequest, ) -> pai_studio_20220112_models.ListTrainingJobInstanceMetricsResponse: + """ + @summary 获取Training Job实例的Metrics + + @param request: ListTrainingJobInstanceMetricsRequest + @return: ListTrainingJobInstanceMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.list_training_job_instance_metrics_with_options(training_job_id, request, headers, runtime) @@ -6173,6 +10386,12 @@ async def list_training_job_instance_metrics_async( training_job_id: str, request: pai_studio_20220112_models.ListTrainingJobInstanceMetricsRequest, ) -> pai_studio_20220112_models.ListTrainingJobInstanceMetricsResponse: + """ + @summary 获取Training Job实例的Metrics + + @param request: ListTrainingJobInstanceMetricsRequest + @return: ListTrainingJobInstanceMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.list_training_job_instance_metrics_with_options_async(training_job_id, request, headers, runtime) @@ -6184,10 +10403,20 @@ def list_training_job_logs_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListTrainingJobLogsResponse: + """ + @summary 获取Training Job的日志 + + @param request: ListTrainingJobLogsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListTrainingJobLogsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.end_time): query['EndTime'] = request.end_time + if not UtilClient.is_unset(request.instance_id): + query['InstanceId'] = request.instance_id if not UtilClient.is_unset(request.page_number): query['PageNumber'] = request.page_number if not UtilClient.is_unset(request.page_size): @@ -6225,10 +10454,20 @@ async def list_training_job_logs_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListTrainingJobLogsResponse: + """ + @summary 获取Training Job的日志 + + @param request: ListTrainingJobLogsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListTrainingJobLogsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.end_time): query['EndTime'] = request.end_time + if not UtilClient.is_unset(request.instance_id): + query['InstanceId'] = request.instance_id if not UtilClient.is_unset(request.page_number): query['PageNumber'] = request.page_number if not UtilClient.is_unset(request.page_size): @@ -6264,6 +10503,12 @@ def list_training_job_logs( training_job_id: str, request: pai_studio_20220112_models.ListTrainingJobLogsRequest, ) -> pai_studio_20220112_models.ListTrainingJobLogsResponse: + """ + @summary 获取Training Job的日志 + + @param request: ListTrainingJobLogsRequest + @return: ListTrainingJobLogsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.list_training_job_logs_with_options(training_job_id, request, headers, runtime) @@ -6273,6 +10518,12 @@ async def list_training_job_logs_async( training_job_id: str, request: pai_studio_20220112_models.ListTrainingJobLogsRequest, ) -> pai_studio_20220112_models.ListTrainingJobLogsResponse: + """ + @summary 获取Training Job的日志 + + @param request: ListTrainingJobLogsRequest + @return: ListTrainingJobLogsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.list_training_job_logs_with_options_async(training_job_id, request, headers, runtime) @@ -6284,6 +10535,14 @@ def list_training_job_metrics_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListTrainingJobMetricsResponse: + """ + @summary 获取Training Job的Metrics + + @param request: ListTrainingJobMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListTrainingJobMetricsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.end_time): @@ -6327,6 +10586,14 @@ async def list_training_job_metrics_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListTrainingJobMetricsResponse: + """ + @summary 获取Training Job的Metrics + + @param request: ListTrainingJobMetricsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListTrainingJobMetricsResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.end_time): @@ -6368,6 +10635,12 @@ def list_training_job_metrics( training_job_id: str, request: pai_studio_20220112_models.ListTrainingJobMetricsRequest, ) -> pai_studio_20220112_models.ListTrainingJobMetricsResponse: + """ + @summary 获取Training Job的Metrics + + @param request: ListTrainingJobMetricsRequest + @return: ListTrainingJobMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.list_training_job_metrics_with_options(training_job_id, request, headers, runtime) @@ -6377,16 +10650,138 @@ async def list_training_job_metrics_async( training_job_id: str, request: pai_studio_20220112_models.ListTrainingJobMetricsRequest, ) -> pai_studio_20220112_models.ListTrainingJobMetricsResponse: + """ + @summary 获取Training Job的Metrics + + @param request: ListTrainingJobMetricsRequest + @return: ListTrainingJobMetricsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.list_training_job_metrics_with_options_async(training_job_id, request, headers, runtime) + def list_training_job_output_models_with_options( + self, + training_job_id: str, + request: pai_studio_20220112_models.ListTrainingJobOutputModelsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.ListTrainingJobOutputModelsResponse: + """ + @summary 获取Training Job 产出的所有模型信息 + + @param request: ListTrainingJobOutputModelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListTrainingJobOutputModelsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.token): + query['Token'] = request.token + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListTrainingJobOutputModels', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/trainingjobs/{OpenApiUtilClient.get_encode_param(training_job_id)}/outputmodels', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.ListTrainingJobOutputModelsResponse(), + self.call_api(params, req, runtime) + ) + + async def list_training_job_output_models_with_options_async( + self, + training_job_id: str, + request: pai_studio_20220112_models.ListTrainingJobOutputModelsRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.ListTrainingJobOutputModelsResponse: + """ + @summary 获取Training Job 产出的所有模型信息 + + @param request: ListTrainingJobOutputModelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListTrainingJobOutputModelsResponse + """ + UtilClient.validate_model(request) + query = {} + if not UtilClient.is_unset(request.token): + query['Token'] = request.token + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='ListTrainingJobOutputModels', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/trainingjobs/{OpenApiUtilClient.get_encode_param(training_job_id)}/outputmodels', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.ListTrainingJobOutputModelsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_training_job_output_models( + self, + training_job_id: str, + request: pai_studio_20220112_models.ListTrainingJobOutputModelsRequest, + ) -> pai_studio_20220112_models.ListTrainingJobOutputModelsResponse: + """ + @summary 获取Training Job 产出的所有模型信息 + + @param request: ListTrainingJobOutputModelsRequest + @return: ListTrainingJobOutputModelsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_training_job_output_models_with_options(training_job_id, request, headers, runtime) + + async def list_training_job_output_models_async( + self, + training_job_id: str, + request: pai_studio_20220112_models.ListTrainingJobOutputModelsRequest, + ) -> pai_studio_20220112_models.ListTrainingJobOutputModelsResponse: + """ + @summary 获取Training Job 产出的所有模型信息 + + @param request: ListTrainingJobOutputModelsRequest + @return: ListTrainingJobOutputModelsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_training_job_output_models_with_options_async(training_job_id, request, headers, runtime) + def list_training_jobs_with_options( self, tmp_req: pai_studio_20220112_models.ListTrainingJobsRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListTrainingJobsResponse: + """ + @summary 获取TrainingJob的列表 + + @param tmp_req: ListTrainingJobsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListTrainingJobsResponse + """ UtilClient.validate_model(tmp_req) request = pai_studio_20220112_models.ListTrainingJobsShrinkRequest() OpenApiUtilClient.convert(tmp_req, request) @@ -6447,6 +10842,14 @@ async def list_training_jobs_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ListTrainingJobsResponse: + """ + @summary 获取TrainingJob的列表 + + @param tmp_req: ListTrainingJobsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ListTrainingJobsResponse + """ UtilClient.validate_model(tmp_req) request = pai_studio_20220112_models.ListTrainingJobsShrinkRequest() OpenApiUtilClient.convert(tmp_req, request) @@ -6486,36 +10889,160 @@ async def list_training_jobs_with_options_async( query=OpenApiUtilClient.query(query) ) params = open_api_models.Params( - action='ListTrainingJobs', + action='ListTrainingJobs', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/trainingjobs', + method='GET', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.ListTrainingJobsResponse(), + await self.call_api_async(params, req, runtime) + ) + + def list_training_jobs( + self, + request: pai_studio_20220112_models.ListTrainingJobsRequest, + ) -> pai_studio_20220112_models.ListTrainingJobsResponse: + """ + @summary 获取TrainingJob的列表 + + @param request: ListTrainingJobsRequest + @return: ListTrainingJobsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.list_training_jobs_with_options(request, headers, runtime) + + async def list_training_jobs_async( + self, + request: pai_studio_20220112_models.ListTrainingJobsRequest, + ) -> pai_studio_20220112_models.ListTrainingJobsResponse: + """ + @summary 获取TrainingJob的列表 + + @param request: ListTrainingJobsRequest + @return: ListTrainingJobsResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.list_training_jobs_with_options_async(request, headers, runtime) + + def operate_node_with_options( + self, + node_id: str, + request: pai_studio_20220112_models.OperateNodeRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.OperateNodeResponse: + """ + @summary 您可以通过OperateNode对节点进行操作 + + @param request: OperateNodeRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: OperateNodeResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.operation): + body['Operation'] = request.operation + if not UtilClient.is_unset(request.resource_group_id): + body['ResourceGroupId'] = request.resource_group_id + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='OperateNode', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/nodes/{OpenApiUtilClient.get_encode_param(node_id)}', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.OperateNodeResponse(), + self.call_api(params, req, runtime) + ) + + async def operate_node_with_options_async( + self, + node_id: str, + request: pai_studio_20220112_models.OperateNodeRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.OperateNodeResponse: + """ + @summary 您可以通过OperateNode对节点进行操作 + + @param request: OperateNodeRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: OperateNodeResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.operation): + body['Operation'] = request.operation + if not UtilClient.is_unset(request.resource_group_id): + body['ResourceGroupId'] = request.resource_group_id + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='OperateNode', version='2022-01-12', protocol='HTTPS', - pathname=f'/api/v1/trainingjobs', - method='GET', + pathname=f'/api/v1/nodes/{OpenApiUtilClient.get_encode_param(node_id)}', + method='POST', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - pai_studio_20220112_models.ListTrainingJobsResponse(), + pai_studio_20220112_models.OperateNodeResponse(), await self.call_api_async(params, req, runtime) ) - def list_training_jobs( + def operate_node( self, - request: pai_studio_20220112_models.ListTrainingJobsRequest, - ) -> pai_studio_20220112_models.ListTrainingJobsResponse: + node_id: str, + request: pai_studio_20220112_models.OperateNodeRequest, + ) -> pai_studio_20220112_models.OperateNodeResponse: + """ + @summary 您可以通过OperateNode对节点进行操作 + + @param request: OperateNodeRequest + @return: OperateNodeResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.list_training_jobs_with_options(request, headers, runtime) + return self.operate_node_with_options(node_id, request, headers, runtime) - async def list_training_jobs_async( + async def operate_node_async( self, - request: pai_studio_20220112_models.ListTrainingJobsRequest, - ) -> pai_studio_20220112_models.ListTrainingJobsResponse: + node_id: str, + request: pai_studio_20220112_models.OperateNodeRequest, + ) -> pai_studio_20220112_models.OperateNodeResponse: + """ + @summary 您可以通过OperateNode对节点进行操作 + + @param request: OperateNodeRequest + @return: OperateNodeResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.list_training_jobs_with_options_async(request, headers, runtime) + return await self.operate_node_with_options_async(node_id, request, headers, runtime) def release_algorithm_with_options( self, @@ -6524,6 +11051,14 @@ def release_algorithm_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ReleaseAlgorithmResponse: + """ + @summary 发布算法为公共算法 + + @param request: ReleaseAlgorithmRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ReleaseAlgorithmResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.target_algorithm_name): @@ -6557,6 +11092,14 @@ async def release_algorithm_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ReleaseAlgorithmResponse: + """ + @summary 发布算法为公共算法 + + @param request: ReleaseAlgorithmRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ReleaseAlgorithmResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.target_algorithm_name): @@ -6588,6 +11131,12 @@ def release_algorithm( algorithm_id: str, request: pai_studio_20220112_models.ReleaseAlgorithmRequest, ) -> pai_studio_20220112_models.ReleaseAlgorithmResponse: + """ + @summary 发布算法为公共算法 + + @param request: ReleaseAlgorithmRequest + @return: ReleaseAlgorithmResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.release_algorithm_with_options(algorithm_id, request, headers, runtime) @@ -6597,6 +11146,12 @@ async def release_algorithm_async( algorithm_id: str, request: pai_studio_20220112_models.ReleaseAlgorithmRequest, ) -> pai_studio_20220112_models.ReleaseAlgorithmResponse: + """ + @summary 发布算法为公共算法 + + @param request: ReleaseAlgorithmRequest + @return: ReleaseAlgorithmResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.release_algorithm_with_options_async(algorithm_id, request, headers, runtime) @@ -6609,6 +11164,14 @@ def release_algorithm_version_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ReleaseAlgorithmVersionResponse: + """ + @summary 发布公共算法版本 + + @param request: ReleaseAlgorithmVersionRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ReleaseAlgorithmVersionResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.target_algorithm_name): @@ -6645,6 +11208,14 @@ async def release_algorithm_version_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ReleaseAlgorithmVersionResponse: + """ + @summary 发布公共算法版本 + + @param request: ReleaseAlgorithmVersionRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ReleaseAlgorithmVersionResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.target_algorithm_name): @@ -6679,6 +11250,12 @@ def release_algorithm_version( algorithm_version: str, request: pai_studio_20220112_models.ReleaseAlgorithmVersionRequest, ) -> pai_studio_20220112_models.ReleaseAlgorithmVersionResponse: + """ + @summary 发布公共算法版本 + + @param request: ReleaseAlgorithmVersionRequest + @return: ReleaseAlgorithmVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.release_algorithm_version_with_options(algorithm_id, algorithm_version, request, headers, runtime) @@ -6689,10 +11266,110 @@ async def release_algorithm_version_async( algorithm_version: str, request: pai_studio_20220112_models.ReleaseAlgorithmVersionRequest, ) -> pai_studio_20220112_models.ReleaseAlgorithmVersionResponse: + """ + @summary 发布公共算法版本 + + @param request: ReleaseAlgorithmVersionRequest + @return: ReleaseAlgorithmVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.release_algorithm_version_with_options_async(algorithm_id, algorithm_version, request, headers, runtime) + def release_machine_group_with_options( + self, + resource_group_id: str, + machine_group_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.ReleaseMachineGroupResponse: + """ + @summary 释放到期的机器组 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ReleaseMachineGroupResponse + """ + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='ReleaseMachineGroup', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/resources/{OpenApiUtilClient.get_encode_param(resource_group_id)}/machinegroups/{OpenApiUtilClient.get_encode_param(machine_group_id)}', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.ReleaseMachineGroupResponse(), + self.call_api(params, req, runtime) + ) + + async def release_machine_group_with_options_async( + self, + resource_group_id: str, + machine_group_id: str, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.ReleaseMachineGroupResponse: + """ + @summary 释放到期的机器组 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ReleaseMachineGroupResponse + """ + req = open_api_models.OpenApiRequest( + headers=headers + ) + params = open_api_models.Params( + action='ReleaseMachineGroup', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/resources/{OpenApiUtilClient.get_encode_param(resource_group_id)}/machinegroups/{OpenApiUtilClient.get_encode_param(machine_group_id)}', + method='POST', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.ReleaseMachineGroupResponse(), + await self.call_api_async(params, req, runtime) + ) + + def release_machine_group( + self, + resource_group_id: str, + machine_group_id: str, + ) -> pai_studio_20220112_models.ReleaseMachineGroupResponse: + """ + @summary 释放到期的机器组 + + @return: ReleaseMachineGroupResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.release_machine_group_with_options(resource_group_id, machine_group_id, headers, runtime) + + async def release_machine_group_async( + self, + resource_group_id: str, + machine_group_id: str, + ) -> pai_studio_20220112_models.ReleaseMachineGroupResponse: + """ + @summary 释放到期的机器组 + + @return: ReleaseMachineGroupResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.release_machine_group_with_options_async(resource_group_id, machine_group_id, headers, runtime) + def scale_quota_with_options( self, quota_id: str, @@ -6700,6 +11377,14 @@ def scale_quota_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ScaleQuotaResponse: + """ + @summary 扩缩容Quota + + @param request: ScaleQuotaRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ScaleQuotaResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.min): @@ -6733,6 +11418,14 @@ async def scale_quota_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.ScaleQuotaResponse: + """ + @summary 扩缩容Quota + + @param request: ScaleQuotaRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: ScaleQuotaResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.min): @@ -6764,6 +11457,12 @@ def scale_quota( quota_id: str, request: pai_studio_20220112_models.ScaleQuotaRequest, ) -> pai_studio_20220112_models.ScaleQuotaResponse: + """ + @summary 扩缩容Quota + + @param request: ScaleQuotaRequest + @return: ScaleQuotaResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.scale_quota_with_options(quota_id, request, headers, runtime) @@ -6773,24 +11472,37 @@ async def scale_quota_async( quota_id: str, request: pai_studio_20220112_models.ScaleQuotaRequest, ) -> pai_studio_20220112_models.ScaleQuotaResponse: + """ + @summary 扩缩容Quota + + @param request: ScaleQuotaRequest + @return: ScaleQuotaResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.scale_quota_with_options_async(quota_id, request, headers, runtime) - def stop_arrears_training_job_with_options( + def stop_training_job_with_options( self, training_job_id: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> pai_studio_20220112_models.StopArrearsTrainingJobResponse: + ) -> pai_studio_20220112_models.StopTrainingJobResponse: + """ + @summary 停止一个TrainingJob + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: StopTrainingJobResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) params = open_api_models.Params( - action='StopArrearsTrainingJob', + action='StopTrainingJob', version='2022-01-12', protocol='HTTPS', - pathname=f'/api/v1/trainingjobs/{OpenApiUtilClient.get_encode_param(training_job_id)}/arrearage/stop', + pathname=f'/api/v1/trainingjobs/{OpenApiUtilClient.get_encode_param(training_job_id)}/stop', method='PUT', auth_type='AK', style='ROA', @@ -6798,24 +11510,31 @@ def stop_arrears_training_job_with_options( body_type='json' ) return TeaCore.from_map( - pai_studio_20220112_models.StopArrearsTrainingJobResponse(), + pai_studio_20220112_models.StopTrainingJobResponse(), self.call_api(params, req, runtime) ) - async def stop_arrears_training_job_with_options_async( + async def stop_training_job_with_options_async( self, training_job_id: str, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> pai_studio_20220112_models.StopArrearsTrainingJobResponse: + ) -> pai_studio_20220112_models.StopTrainingJobResponse: + """ + @summary 停止一个TrainingJob + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: StopTrainingJobResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) params = open_api_models.Params( - action='StopArrearsTrainingJob', + action='StopTrainingJob', version='2022-01-12', protocol='HTTPS', - pathname=f'/api/v1/trainingjobs/{OpenApiUtilClient.get_encode_param(training_job_id)}/arrearage/stop', + pathname=f'/api/v1/trainingjobs/{OpenApiUtilClient.get_encode_param(training_job_id)}/stop', method='PUT', auth_type='AK', style='ROA', @@ -6823,91 +11542,283 @@ async def stop_arrears_training_job_with_options_async( body_type='json' ) return TeaCore.from_map( - pai_studio_20220112_models.StopArrearsTrainingJobResponse(), + pai_studio_20220112_models.StopTrainingJobResponse(), await self.call_api_async(params, req, runtime) ) - def stop_arrears_training_job( + def stop_training_job( self, training_job_id: str, - ) -> pai_studio_20220112_models.StopArrearsTrainingJobResponse: + ) -> pai_studio_20220112_models.StopTrainingJobResponse: + """ + @summary 停止一个TrainingJob + + @return: StopTrainingJobResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.stop_arrears_training_job_with_options(training_job_id, headers, runtime) + return self.stop_training_job_with_options(training_job_id, headers, runtime) - async def stop_arrears_training_job_async( + async def stop_training_job_async( self, training_job_id: str, - ) -> pai_studio_20220112_models.StopArrearsTrainingJobResponse: + ) -> pai_studio_20220112_models.StopTrainingJobResponse: + """ + @summary 停止一个TrainingJob + + @return: StopTrainingJobResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.stop_arrears_training_job_with_options_async(training_job_id, headers, runtime) + return await self.stop_training_job_with_options_async(training_job_id, headers, runtime) - def stop_training_job_with_options( + def tag_resources_with_options( self, - training_job_id: str, + request: pai_studio_20220112_models.TagResourcesRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> pai_studio_20220112_models.StopTrainingJobResponse: + ) -> pai_studio_20220112_models.TagResourcesResponse: + """ + @summary 打标签接口 + + @param request: TagResourcesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: TagResourcesResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.region_id): + body['RegionId'] = request.region_id + if not UtilClient.is_unset(request.resource_id): + body['ResourceId'] = request.resource_id + if not UtilClient.is_unset(request.resource_type): + body['ResourceType'] = request.resource_type + if not UtilClient.is_unset(request.tag): + body['Tag'] = request.tag req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) ) params = open_api_models.Params( - action='StopTrainingJob', + action='TagResources', version='2022-01-12', protocol='HTTPS', - pathname=f'/api/v1/trainingjobs/{OpenApiUtilClient.get_encode_param(training_job_id)}/stop', - method='PUT', + pathname=f'/api/v1/tags', + method='POST', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - pai_studio_20220112_models.StopTrainingJobResponse(), + pai_studio_20220112_models.TagResourcesResponse(), self.call_api(params, req, runtime) ) - async def stop_training_job_with_options_async( + async def tag_resources_with_options_async( self, - training_job_id: str, + request: pai_studio_20220112_models.TagResourcesRequest, headers: Dict[str, str], runtime: util_models.RuntimeOptions, - ) -> pai_studio_20220112_models.StopTrainingJobResponse: + ) -> pai_studio_20220112_models.TagResourcesResponse: + """ + @summary 打标签接口 + + @param request: TagResourcesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: TagResourcesResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.region_id): + body['RegionId'] = request.region_id + if not UtilClient.is_unset(request.resource_id): + body['ResourceId'] = request.resource_id + if not UtilClient.is_unset(request.resource_type): + body['ResourceType'] = request.resource_type + if not UtilClient.is_unset(request.tag): + body['Tag'] = request.tag req = open_api_models.OpenApiRequest( - headers=headers + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) ) params = open_api_models.Params( - action='StopTrainingJob', + action='TagResources', version='2022-01-12', protocol='HTTPS', - pathname=f'/api/v1/trainingjobs/{OpenApiUtilClient.get_encode_param(training_job_id)}/stop', - method='PUT', + pathname=f'/api/v1/tags', + method='POST', auth_type='AK', style='ROA', req_body_type='json', body_type='json' ) return TeaCore.from_map( - pai_studio_20220112_models.StopTrainingJobResponse(), + pai_studio_20220112_models.TagResourcesResponse(), await self.call_api_async(params, req, runtime) ) - def stop_training_job( + def tag_resources( self, - training_job_id: str, - ) -> pai_studio_20220112_models.StopTrainingJobResponse: + request: pai_studio_20220112_models.TagResourcesRequest, + ) -> pai_studio_20220112_models.TagResourcesResponse: + """ + @summary 打标签接口 + + @param request: TagResourcesRequest + @return: TagResourcesResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return self.stop_training_job_with_options(training_job_id, headers, runtime) + return self.tag_resources_with_options(request, headers, runtime) - async def stop_training_job_async( + async def tag_resources_async( self, - training_job_id: str, - ) -> pai_studio_20220112_models.StopTrainingJobResponse: + request: pai_studio_20220112_models.TagResourcesRequest, + ) -> pai_studio_20220112_models.TagResourcesResponse: + """ + @summary 打标签接口 + + @param request: TagResourcesRequest + @return: TagResourcesResponse + """ runtime = util_models.RuntimeOptions() headers = {} - return await self.stop_training_job_with_options_async(training_job_id, headers, runtime) + return await self.tag_resources_with_options_async(request, headers, runtime) + + def untag_resources_with_options( + self, + tmp_req: pai_studio_20220112_models.UntagResourcesRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.UntagResourcesResponse: + """ + @summary 删标签接口 + + @param tmp_req: UntagResourcesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UntagResourcesResponse + """ + UtilClient.validate_model(tmp_req) + request = pai_studio_20220112_models.UntagResourcesShrinkRequest() + OpenApiUtilClient.convert(tmp_req, request) + if not UtilClient.is_unset(tmp_req.resource_id): + request.resource_id_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.resource_id, 'ResourceId', 'json') + if not UtilClient.is_unset(tmp_req.tag_key): + request.tag_key_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.tag_key, 'TagKey', 'json') + query = {} + if not UtilClient.is_unset(request.all): + query['All'] = request.all + if not UtilClient.is_unset(request.region_id): + query['RegionId'] = request.region_id + if not UtilClient.is_unset(request.resource_id_shrink): + query['ResourceId'] = request.resource_id_shrink + if not UtilClient.is_unset(request.resource_type): + query['ResourceType'] = request.resource_type + if not UtilClient.is_unset(request.tag_key_shrink): + query['TagKey'] = request.tag_key_shrink + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='UntagResources', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/tags', + method='DELETE', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.UntagResourcesResponse(), + self.call_api(params, req, runtime) + ) + + async def untag_resources_with_options_async( + self, + tmp_req: pai_studio_20220112_models.UntagResourcesRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.UntagResourcesResponse: + """ + @summary 删标签接口 + + @param tmp_req: UntagResourcesRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UntagResourcesResponse + """ + UtilClient.validate_model(tmp_req) + request = pai_studio_20220112_models.UntagResourcesShrinkRequest() + OpenApiUtilClient.convert(tmp_req, request) + if not UtilClient.is_unset(tmp_req.resource_id): + request.resource_id_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.resource_id, 'ResourceId', 'json') + if not UtilClient.is_unset(tmp_req.tag_key): + request.tag_key_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.tag_key, 'TagKey', 'json') + query = {} + if not UtilClient.is_unset(request.all): + query['All'] = request.all + if not UtilClient.is_unset(request.region_id): + query['RegionId'] = request.region_id + if not UtilClient.is_unset(request.resource_id_shrink): + query['ResourceId'] = request.resource_id_shrink + if not UtilClient.is_unset(request.resource_type): + query['ResourceType'] = request.resource_type + if not UtilClient.is_unset(request.tag_key_shrink): + query['TagKey'] = request.tag_key_shrink + req = open_api_models.OpenApiRequest( + headers=headers, + query=OpenApiUtilClient.query(query) + ) + params = open_api_models.Params( + action='UntagResources', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/tags', + method='DELETE', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.UntagResourcesResponse(), + await self.call_api_async(params, req, runtime) + ) + + def untag_resources( + self, + request: pai_studio_20220112_models.UntagResourcesRequest, + ) -> pai_studio_20220112_models.UntagResourcesResponse: + """ + @summary 删标签接口 + + @param request: UntagResourcesRequest + @return: UntagResourcesResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.untag_resources_with_options(request, headers, runtime) + + async def untag_resources_async( + self, + request: pai_studio_20220112_models.UntagResourcesRequest, + ) -> pai_studio_20220112_models.UntagResourcesResponse: + """ + @summary 删标签接口 + + @param request: UntagResourcesRequest + @return: UntagResourcesResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.untag_resources_with_options_async(request, headers, runtime) def update_algorithm_with_options( self, @@ -6916,6 +11827,14 @@ def update_algorithm_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.UpdateAlgorithmResponse: + """ + @summary 更新算法 + + @param request: UpdateAlgorithmRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateAlgorithmResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.algorithm_description): @@ -6949,6 +11868,14 @@ async def update_algorithm_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.UpdateAlgorithmResponse: + """ + @summary 更新算法 + + @param request: UpdateAlgorithmRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateAlgorithmResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.algorithm_description): @@ -6980,6 +11907,12 @@ def update_algorithm( algorithm_id: str, request: pai_studio_20220112_models.UpdateAlgorithmRequest, ) -> pai_studio_20220112_models.UpdateAlgorithmResponse: + """ + @summary 更新算法 + + @param request: UpdateAlgorithmRequest + @return: UpdateAlgorithmResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.update_algorithm_with_options(algorithm_id, request, headers, runtime) @@ -6989,6 +11922,12 @@ async def update_algorithm_async( algorithm_id: str, request: pai_studio_20220112_models.UpdateAlgorithmRequest, ) -> pai_studio_20220112_models.UpdateAlgorithmResponse: + """ + @summary 更新算法 + + @param request: UpdateAlgorithmRequest + @return: UpdateAlgorithmResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.update_algorithm_with_options_async(algorithm_id, request, headers, runtime) @@ -7001,6 +11940,14 @@ def update_algorithm_version_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.UpdateAlgorithmVersionResponse: + """ + @summary 更新算法 + + @param tmp_req: UpdateAlgorithmVersionRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateAlgorithmVersionResponse + """ UtilClient.validate_model(tmp_req) request = pai_studio_20220112_models.UpdateAlgorithmVersionShrinkRequest() OpenApiUtilClient.convert(tmp_req, request) @@ -7037,6 +11984,14 @@ async def update_algorithm_version_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.UpdateAlgorithmVersionResponse: + """ + @summary 更新算法 + + @param tmp_req: UpdateAlgorithmVersionRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateAlgorithmVersionResponse + """ UtilClient.validate_model(tmp_req) request = pai_studio_20220112_models.UpdateAlgorithmVersionShrinkRequest() OpenApiUtilClient.convert(tmp_req, request) @@ -7071,6 +12026,12 @@ def update_algorithm_version( algorithm_version: str, request: pai_studio_20220112_models.UpdateAlgorithmVersionRequest, ) -> pai_studio_20220112_models.UpdateAlgorithmVersionResponse: + """ + @summary 更新算法 + + @param request: UpdateAlgorithmVersionRequest + @return: UpdateAlgorithmVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.update_algorithm_version_with_options(algorithm_id, algorithm_version, request, headers, runtime) @@ -7081,6 +12042,12 @@ async def update_algorithm_version_async( algorithm_version: str, request: pai_studio_20220112_models.UpdateAlgorithmVersionRequest, ) -> pai_studio_20220112_models.UpdateAlgorithmVersionResponse: + """ + @summary 更新算法 + + @param request: UpdateAlgorithmVersionRequest + @return: UpdateAlgorithmVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.update_algorithm_version_with_options_async(algorithm_id, algorithm_version, request, headers, runtime) @@ -7092,6 +12059,14 @@ def update_component_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.UpdateComponentResponse: + """ + @summary 更新组件 + + @param request: UpdateComponentRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateComponentResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.description): @@ -7127,6 +12102,14 @@ async def update_component_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.UpdateComponentResponse: + """ + @summary 更新组件 + + @param request: UpdateComponentRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateComponentResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.description): @@ -7160,6 +12143,12 @@ def update_component( component_id: str, request: pai_studio_20220112_models.UpdateComponentRequest, ) -> pai_studio_20220112_models.UpdateComponentResponse: + """ + @summary 更新组件 + + @param request: UpdateComponentRequest + @return: UpdateComponentResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.update_component_with_options(component_id, request, headers, runtime) @@ -7169,6 +12158,12 @@ async def update_component_async( component_id: str, request: pai_studio_20220112_models.UpdateComponentRequest, ) -> pai_studio_20220112_models.UpdateComponentResponse: + """ + @summary 更新组件 + + @param request: UpdateComponentRequest + @return: UpdateComponentResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.update_component_with_options_async(component_id, request, headers, runtime) @@ -7181,6 +12176,14 @@ def update_component_version_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.UpdateComponentVersionResponse: + """ + @summary 更新组件版本 + + @param request: UpdateComponentVersionRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateComponentVersionResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.labels): @@ -7215,6 +12218,14 @@ async def update_component_version_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.UpdateComponentVersionResponse: + """ + @summary 更新组件版本 + + @param request: UpdateComponentVersionRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateComponentVersionResponse + """ UtilClient.validate_model(request) query = {} if not UtilClient.is_unset(request.labels): @@ -7247,6 +12258,12 @@ def update_component_version( version: str, request: pai_studio_20220112_models.UpdateComponentVersionRequest, ) -> pai_studio_20220112_models.UpdateComponentVersionResponse: + """ + @summary 更新组件版本 + + @param request: UpdateComponentVersionRequest + @return: UpdateComponentVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.update_component_version_with_options(component_id, version, request, headers, runtime) @@ -7257,6 +12274,12 @@ async def update_component_version_async( version: str, request: pai_studio_20220112_models.UpdateComponentVersionRequest, ) -> pai_studio_20220112_models.UpdateComponentVersionResponse: + """ + @summary 更新组件版本 + + @param request: UpdateComponentVersionRequest + @return: UpdateComponentVersionResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.update_component_version_with_options_async(component_id, version, request, headers, runtime) @@ -7267,6 +12290,13 @@ def update_component_version_snapshot_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.UpdateComponentVersionSnapshotResponse: + """ + @summary 更新组件版本快照 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateComponentVersionSnapshotResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -7292,6 +12322,13 @@ async def update_component_version_snapshot_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.UpdateComponentVersionSnapshotResponse: + """ + @summary 更新组件版本快照 + + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateComponentVersionSnapshotResponse + """ req = open_api_models.OpenApiRequest( headers=headers ) @@ -7315,6 +12352,11 @@ def update_component_version_snapshot( self, snapshot_id: str, ) -> pai_studio_20220112_models.UpdateComponentVersionSnapshotResponse: + """ + @summary 更新组件版本快照 + + @return: UpdateComponentVersionSnapshotResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.update_component_version_snapshot_with_options(snapshot_id, headers, runtime) @@ -7323,6 +12365,11 @@ async def update_component_version_snapshot_async( self, snapshot_id: str, ) -> pai_studio_20220112_models.UpdateComponentVersionSnapshotResponse: + """ + @summary 更新组件版本快照 + + @return: UpdateComponentVersionSnapshotResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.update_component_version_snapshot_with_options_async(snapshot_id, headers, runtime) @@ -7334,12 +12381,26 @@ def update_quota_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.UpdateQuotaResponse: + """ + @summary 更新Quota + + @param request: UpdateQuotaRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateQuotaResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.description): body['Description'] = request.description if not UtilClient.is_unset(request.labels): body['Labels'] = request.labels + if not UtilClient.is_unset(request.queue_strategy): + body['QueueStrategy'] = request.queue_strategy + if not UtilClient.is_unset(request.quota_config): + body['QuotaConfig'] = request.quota_config + if not UtilClient.is_unset(request.quota_name): + body['QuotaName'] = request.quota_name req = open_api_models.OpenApiRequest( headers=headers, body=OpenApiUtilClient.parse_to_map(body) @@ -7367,12 +12428,26 @@ async def update_quota_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.UpdateQuotaResponse: + """ + @summary 更新Quota + + @param request: UpdateQuotaRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateQuotaResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.description): body['Description'] = request.description if not UtilClient.is_unset(request.labels): body['Labels'] = request.labels + if not UtilClient.is_unset(request.queue_strategy): + body['QueueStrategy'] = request.queue_strategy + if not UtilClient.is_unset(request.quota_config): + body['QuotaConfig'] = request.quota_config + if not UtilClient.is_unset(request.quota_name): + body['QuotaName'] = request.quota_name req = open_api_models.OpenApiRequest( headers=headers, body=OpenApiUtilClient.parse_to_map(body) @@ -7398,6 +12473,12 @@ def update_quota( quota_id: str, request: pai_studio_20220112_models.UpdateQuotaRequest, ) -> pai_studio_20220112_models.UpdateQuotaResponse: + """ + @summary 更新Quota + + @param request: UpdateQuotaRequest + @return: UpdateQuotaResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.update_quota_with_options(quota_id, request, headers, runtime) @@ -7407,6 +12488,12 @@ async def update_quota_async( quota_id: str, request: pai_studio_20220112_models.UpdateQuotaRequest, ) -> pai_studio_20220112_models.UpdateQuotaResponse: + """ + @summary 更新Quota + + @param request: UpdateQuotaRequest + @return: UpdateQuotaResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.update_quota_with_options_async(quota_id, request, headers, runtime) @@ -7418,6 +12505,14 @@ def update_quota_labels_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.UpdateQuotaLabelsResponse: + """ + @summary 更新Quota标签 + + @param request: UpdateQuotaLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateQuotaLabelsResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.labels): @@ -7449,6 +12544,14 @@ async def update_quota_labels_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.UpdateQuotaLabelsResponse: + """ + @summary 更新Quota标签 + + @param request: UpdateQuotaLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateQuotaLabelsResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.labels): @@ -7478,6 +12581,12 @@ def update_quota_labels( quota_id: str, request: pai_studio_20220112_models.UpdateQuotaLabelsRequest, ) -> pai_studio_20220112_models.UpdateQuotaLabelsResponse: + """ + @summary 更新Quota标签 + + @param request: UpdateQuotaLabelsRequest + @return: UpdateQuotaLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.update_quota_labels_with_options(quota_id, request, headers, runtime) @@ -7487,6 +12596,12 @@ async def update_quota_labels_async( quota_id: str, request: pai_studio_20220112_models.UpdateQuotaLabelsRequest, ) -> pai_studio_20220112_models.UpdateQuotaLabelsResponse: + """ + @summary 更新Quota标签 + + @param request: UpdateQuotaLabelsRequest + @return: UpdateQuotaLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.update_quota_labels_with_options_async(quota_id, request, headers, runtime) @@ -7498,8 +12613,20 @@ def update_resource_group_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.UpdateResourceGroupResponse: + """ + @summary 更新Resource Group + + @param request: UpdateResourceGroupRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateResourceGroupResponse + """ UtilClient.validate_model(request) body = {} + if not UtilClient.is_unset(request.description): + body['Description'] = request.description + if not UtilClient.is_unset(request.name): + body['Name'] = request.name if not UtilClient.is_unset(request.unbind): body['Unbind'] = request.unbind if not UtilClient.is_unset(request.user_vpc): @@ -7531,8 +12658,20 @@ async def update_resource_group_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.UpdateResourceGroupResponse: + """ + @summary 更新Resource Group + + @param request: UpdateResourceGroupRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateResourceGroupResponse + """ UtilClient.validate_model(request) body = {} + if not UtilClient.is_unset(request.description): + body['Description'] = request.description + if not UtilClient.is_unset(request.name): + body['Name'] = request.name if not UtilClient.is_unset(request.unbind): body['Unbind'] = request.unbind if not UtilClient.is_unset(request.user_vpc): @@ -7562,6 +12701,12 @@ def update_resource_group( resource_group_id: str, request: pai_studio_20220112_models.UpdateResourceGroupRequest, ) -> pai_studio_20220112_models.UpdateResourceGroupResponse: + """ + @summary 更新Resource Group + + @param request: UpdateResourceGroupRequest + @return: UpdateResourceGroupResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.update_resource_group_with_options(resource_group_id, request, headers, runtime) @@ -7571,10 +12716,128 @@ async def update_resource_group_async( resource_group_id: str, request: pai_studio_20220112_models.UpdateResourceGroupRequest, ) -> pai_studio_20220112_models.UpdateResourceGroupResponse: + """ + @summary 更新Resource Group + + @param request: UpdateResourceGroupRequest + @return: UpdateResourceGroupResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.update_resource_group_with_options_async(resource_group_id, request, headers, runtime) + def update_resource_group_machine_group_with_options( + self, + resource_group_id: str, + machine_group_id: str, + request: pai_studio_20220112_models.UpdateResourceGroupMachineGroupRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.UpdateResourceGroupMachineGroupResponse: + """ + @summary 更新Machine Group + + @param request: UpdateResourceGroupMachineGroupRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateResourceGroupMachineGroupResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.name): + body['Name'] = request.name + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='UpdateResourceGroupMachineGroup', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/resources/{OpenApiUtilClient.get_encode_param(resource_group_id)}/machinegroups/{OpenApiUtilClient.get_encode_param(machine_group_id)}', + method='PUT', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.UpdateResourceGroupMachineGroupResponse(), + self.call_api(params, req, runtime) + ) + + async def update_resource_group_machine_group_with_options_async( + self, + resource_group_id: str, + machine_group_id: str, + request: pai_studio_20220112_models.UpdateResourceGroupMachineGroupRequest, + headers: Dict[str, str], + runtime: util_models.RuntimeOptions, + ) -> pai_studio_20220112_models.UpdateResourceGroupMachineGroupResponse: + """ + @summary 更新Machine Group + + @param request: UpdateResourceGroupMachineGroupRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateResourceGroupMachineGroupResponse + """ + UtilClient.validate_model(request) + body = {} + if not UtilClient.is_unset(request.name): + body['Name'] = request.name + req = open_api_models.OpenApiRequest( + headers=headers, + body=OpenApiUtilClient.parse_to_map(body) + ) + params = open_api_models.Params( + action='UpdateResourceGroupMachineGroup', + version='2022-01-12', + protocol='HTTPS', + pathname=f'/api/v1/resources/{OpenApiUtilClient.get_encode_param(resource_group_id)}/machinegroups/{OpenApiUtilClient.get_encode_param(machine_group_id)}', + method='PUT', + auth_type='AK', + style='ROA', + req_body_type='json', + body_type='json' + ) + return TeaCore.from_map( + pai_studio_20220112_models.UpdateResourceGroupMachineGroupResponse(), + await self.call_api_async(params, req, runtime) + ) + + def update_resource_group_machine_group( + self, + resource_group_id: str, + machine_group_id: str, + request: pai_studio_20220112_models.UpdateResourceGroupMachineGroupRequest, + ) -> pai_studio_20220112_models.UpdateResourceGroupMachineGroupResponse: + """ + @summary 更新Machine Group + + @param request: UpdateResourceGroupMachineGroupRequest + @return: UpdateResourceGroupMachineGroupResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return self.update_resource_group_machine_group_with_options(resource_group_id, machine_group_id, request, headers, runtime) + + async def update_resource_group_machine_group_async( + self, + resource_group_id: str, + machine_group_id: str, + request: pai_studio_20220112_models.UpdateResourceGroupMachineGroupRequest, + ) -> pai_studio_20220112_models.UpdateResourceGroupMachineGroupResponse: + """ + @summary 更新Machine Group + + @param request: UpdateResourceGroupMachineGroupRequest + @return: UpdateResourceGroupMachineGroupResponse + """ + runtime = util_models.RuntimeOptions() + headers = {} + return await self.update_resource_group_machine_group_with_options_async(resource_group_id, machine_group_id, request, headers, runtime) + def update_training_job_labels_with_options( self, training_job_id: str, @@ -7582,6 +12845,14 @@ def update_training_job_labels_with_options( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.UpdateTrainingJobLabelsResponse: + """ + @summary 更新一个TrainingJob的Labels + + @param request: UpdateTrainingJobLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateTrainingJobLabelsResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.labels): @@ -7613,6 +12884,14 @@ async def update_training_job_labels_with_options_async( headers: Dict[str, str], runtime: util_models.RuntimeOptions, ) -> pai_studio_20220112_models.UpdateTrainingJobLabelsResponse: + """ + @summary 更新一个TrainingJob的Labels + + @param request: UpdateTrainingJobLabelsRequest + @param headers: map + @param runtime: runtime options for this request RuntimeOptions + @return: UpdateTrainingJobLabelsResponse + """ UtilClient.validate_model(request) body = {} if not UtilClient.is_unset(request.labels): @@ -7642,6 +12921,12 @@ def update_training_job_labels( training_job_id: str, request: pai_studio_20220112_models.UpdateTrainingJobLabelsRequest, ) -> pai_studio_20220112_models.UpdateTrainingJobLabelsResponse: + """ + @summary 更新一个TrainingJob的Labels + + @param request: UpdateTrainingJobLabelsRequest + @return: UpdateTrainingJobLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return self.update_training_job_labels_with_options(training_job_id, request, headers, runtime) @@ -7651,6 +12936,12 @@ async def update_training_job_labels_async( training_job_id: str, request: pai_studio_20220112_models.UpdateTrainingJobLabelsRequest, ) -> pai_studio_20220112_models.UpdateTrainingJobLabelsResponse: + """ + @summary 更新一个TrainingJob的Labels + + @param request: UpdateTrainingJobLabelsRequest + @return: UpdateTrainingJobLabelsResponse + """ runtime = util_models.RuntimeOptions() headers = {} return await self.update_training_job_labels_with_options_async(training_job_id, request, headers, runtime) diff --git a/pai/libs/alibabacloud_paistudio20220112/models.py b/pai/libs/alibabacloud_paistudio20220112/models.py index 5092ef3..81eda37 100644 --- a/pai/libs/alibabacloud_paistudio20220112/models.py +++ b/pai/libs/alibabacloud_paistudio20220112/models.py @@ -1,7 +1,40 @@ # -*- coding: utf-8 -*- # This file is auto-generated, don't edit it. Thanks. from Tea.model import TeaModel -from typing import Dict, Any, List +from typing import List, Dict, Any + + +class ACS(TeaModel): + def __init__( + self, + acsquota_id: str = None, + associated_products: List[str] = None, + ): + self.acsquota_id = acsquota_id + self.associated_products = associated_products + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.acsquota_id is not None: + result['ACSQuotaId'] = self.acsquota_id + if self.associated_products is not None: + result['AssociatedProducts'] = self.associated_products + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ACSQuotaId') is not None: + self.acsquota_id = m.get('ACSQuotaId') + if m.get('AssociatedProducts') is not None: + self.associated_products = m.get('AssociatedProducts') + return self class AlgorithmSpecComputeResourcePolicy(TeaModel): @@ -10,7 +43,9 @@ def __init__( value: str = None, version: str = None, ): + # This parameter is required. self.value = value + # This parameter is required. self.version = version def validate(self): @@ -42,6 +77,7 @@ def __init__( self, policy: AlgorithmSpecComputeResourcePolicy = None, ): + # This parameter is required. self.policy = policy def validate(self): @@ -313,9 +349,11 @@ def __init__( self.default_value = default_value self.description = description self.display_name = display_name + # This parameter is required. self.name = name self.range = range self.required = required + # This parameter is required. self.type = type def validate(self): @@ -374,6 +412,7 @@ def __init__( supported_channel_types: List[str] = None, ): self.description = description + # This parameter is required. self.name = name self.properties = properties self.required = required @@ -423,7 +462,9 @@ def __init__( regex: str = None, ): self.description = description + # This parameter is required. self.name = name + # This parameter is required. self.regex = regex def validate(self): @@ -461,8 +502,11 @@ def __init__( operator: str = None, values: List[str] = None, ): + # This parameter is required. self.key = key + # This parameter is required. self.operator = operator + # This parameter is required. self.values = values def validate(self): @@ -512,12 +556,15 @@ def __init__( supports_distributed_training: bool = None, ): self.code_dir = code_dir + # This parameter is required. self.command = command self.compute_resource = compute_resource self.customization = customization self.hyper_parameters = hyper_parameters + # This parameter is required. self.image = image self.input_channels = input_channels + # This parameter is required. self.job_type = job_type self.metric_definitions = metric_definitions self.output_channels = output_channels @@ -728,7 +775,9 @@ def __init__( name: str = None, value: str = None, ): + # This parameter is required. self.name = name + # This parameter is required. self.value = value def validate(self): @@ -769,10 +818,13 @@ def __init__( resource_requirements: List[ConditionExpression] = None, ): self.code_dir = code_dir + # This parameter is required. self.command = command self.hyper_parameters = hyper_parameters + # This parameter is required. self.image = image self.input_channels = input_channels + # This parameter is required. self.job_type = job_type self.metric_definitions = metric_definitions self.output_channels = output_channels @@ -966,6 +1018,51 @@ def from_map(self, m: dict = None): return self +class GPUMetric(TeaModel): + def __init__( + self, + index: int = None, + model: str = None, + status: int = None, + usage_rate: float = None, + ): + self.index = index + self.model = model + self.status = status + self.usage_rate = usage_rate + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.index is not None: + result['Index'] = self.index + if self.model is not None: + result['Model'] = self.model + if self.status is not None: + result['Status'] = self.status + if self.usage_rate is not None: + result['UsageRate'] = self.usage_rate + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Index') is not None: + self.index = m.get('Index') + if m.get('Model') is not None: + self.model = m.get('Model') + if m.get('Status') is not None: + self.status = m.get('Status') + if m.get('UsageRate') is not None: + self.usage_rate = m.get('UsageRate') + return self + + class JobViewMetric(TeaModel): def __init__( self, @@ -1140,6 +1237,7 @@ def __init__( gmt_modified_time: str = None, gmt_started_time: str = None, machine_group_id: str = None, + order_instance_id: str = None, payment_duration: str = None, payment_duration_unit: str = None, payment_type: str = None, @@ -1158,6 +1256,7 @@ def __init__( self.gmt_modified_time = gmt_modified_time self.gmt_started_time = gmt_started_time self.machine_group_id = machine_group_id + self.order_instance_id = order_instance_id self.payment_duration = payment_duration self.payment_duration_unit = payment_duration_unit self.payment_type = payment_type @@ -1194,6 +1293,8 @@ def to_map(self): result['GmtStartedTime'] = self.gmt_started_time if self.machine_group_id is not None: result['MachineGroupID'] = self.machine_group_id + if self.order_instance_id is not None: + result['OrderInstanceId'] = self.order_instance_id if self.payment_duration is not None: result['PaymentDuration'] = self.payment_duration if self.payment_duration_unit is not None: @@ -1232,6 +1333,8 @@ def from_map(self, m: dict = None): self.gmt_started_time = m.get('GmtStartedTime') if m.get('MachineGroupID') is not None: self.machine_group_id = m.get('MachineGroupID') + if m.get('OrderInstanceId') is not None: + self.order_instance_id = m.get('OrderInstanceId') if m.get('PaymentDuration') is not None: self.payment_duration = m.get('PaymentDuration') if m.get('PaymentDurationUnit') is not None: @@ -1317,6 +1420,39 @@ def from_map(self, m: dict = None): return self +class UserInfo(TeaModel): + def __init__( + self, + user_id: str = None, + user_name: str = None, + ): + self.user_id = user_id + self.user_name = user_name + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.user_id is not None: + result['UserId'] = self.user_id + if self.user_name is not None: + result['UserName'] = self.user_name + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('UserName') is not None: + self.user_name = m.get('UserName') + return self + + class Node(TeaModel): def __init__( self, @@ -1330,16 +1466,25 @@ def __init__( gmt_expired_time: str = None, gmt_modified_time: str = None, is_bound: bool = None, + limit_cpu: str = None, + limit_gpu: str = None, + limit_memory: str = None, machine_group_id: str = None, memory: str = None, node_name: str = None, node_status: str = None, node_type: str = None, order_status: str = None, + pod_num: int = None, reason_code: str = None, reason_message: str = None, + request_cpu: str = None, + request_gpu: str = None, + request_memory: str = None, resource_group_id: str = None, resource_group_name: str = None, + users: List[UserInfo] = None, + workload_num: int = None, ): self.accelerator_type = accelerator_type self.bound_quotas = bound_quotas @@ -1351,22 +1496,35 @@ def __init__( self.gmt_expired_time = gmt_expired_time self.gmt_modified_time = gmt_modified_time self.is_bound = is_bound + self.limit_cpu = limit_cpu + self.limit_gpu = limit_gpu + self.limit_memory = limit_memory self.machine_group_id = machine_group_id self.memory = memory self.node_name = node_name self.node_status = node_status self.node_type = node_type self.order_status = order_status + self.pod_num = pod_num self.reason_code = reason_code self.reason_message = reason_message + self.request_cpu = request_cpu + self.request_gpu = request_gpu + self.request_memory = request_memory self.resource_group_id = resource_group_id self.resource_group_name = resource_group_name + self.users = users + self.workload_num = workload_num def validate(self): if self.bound_quotas: for k in self.bound_quotas: if k: k.validate() + if self.users: + for k in self.users: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -1396,6 +1554,12 @@ def to_map(self): result['GmtModifiedTime'] = self.gmt_modified_time if self.is_bound is not None: result['IsBound'] = self.is_bound + if self.limit_cpu is not None: + result['LimitCPU'] = self.limit_cpu + if self.limit_gpu is not None: + result['LimitGPU'] = self.limit_gpu + if self.limit_memory is not None: + result['LimitMemory'] = self.limit_memory if self.machine_group_id is not None: result['MachineGroupId'] = self.machine_group_id if self.memory is not None: @@ -1408,14 +1572,28 @@ def to_map(self): result['NodeType'] = self.node_type if self.order_status is not None: result['OrderStatus'] = self.order_status + if self.pod_num is not None: + result['PodNum'] = self.pod_num if self.reason_code is not None: result['ReasonCode'] = self.reason_code if self.reason_message is not None: result['ReasonMessage'] = self.reason_message + if self.request_cpu is not None: + result['RequestCPU'] = self.request_cpu + if self.request_gpu is not None: + result['RequestGPU'] = self.request_gpu + if self.request_memory is not None: + result['RequestMemory'] = self.request_memory if self.resource_group_id is not None: result['ResourceGroupId'] = self.resource_group_id if self.resource_group_name is not None: result['ResourceGroupName'] = self.resource_group_name + result['Users'] = [] + if self.users is not None: + for k in self.users: + result['Users'].append(k.to_map() if k else None) + if self.workload_num is not None: + result['WorkloadNum'] = self.workload_num return result def from_map(self, m: dict = None): @@ -1443,6 +1621,12 @@ def from_map(self, m: dict = None): self.gmt_modified_time = m.get('GmtModifiedTime') if m.get('IsBound') is not None: self.is_bound = m.get('IsBound') + if m.get('LimitCPU') is not None: + self.limit_cpu = m.get('LimitCPU') + if m.get('LimitGPU') is not None: + self.limit_gpu = m.get('LimitGPU') + if m.get('LimitMemory') is not None: + self.limit_memory = m.get('LimitMemory') if m.get('MachineGroupId') is not None: self.machine_group_id = m.get('MachineGroupId') if m.get('Memory') is not None: @@ -1455,14 +1639,112 @@ def from_map(self, m: dict = None): self.node_type = m.get('NodeType') if m.get('OrderStatus') is not None: self.order_status = m.get('OrderStatus') + if m.get('PodNum') is not None: + self.pod_num = m.get('PodNum') if m.get('ReasonCode') is not None: self.reason_code = m.get('ReasonCode') if m.get('ReasonMessage') is not None: self.reason_message = m.get('ReasonMessage') + if m.get('RequestCPU') is not None: + self.request_cpu = m.get('RequestCPU') + if m.get('RequestGPU') is not None: + self.request_gpu = m.get('RequestGPU') + if m.get('RequestMemory') is not None: + self.request_memory = m.get('RequestMemory') if m.get('ResourceGroupId') is not None: self.resource_group_id = m.get('ResourceGroupId') if m.get('ResourceGroupName') is not None: self.resource_group_name = m.get('ResourceGroupName') + self.users = [] + if m.get('Users') is not None: + for k in m.get('Users'): + temp_model = UserInfo() + self.users.append(temp_model.from_map(k)) + if m.get('WorkloadNum') is not None: + self.workload_num = m.get('WorkloadNum') + return self + + +class NodeGPUMetric(TeaModel): + def __init__( + self, + accelerator_type: str = None, + gpucount: int = None, + gpumetrics: List[GPUMetric] = None, + gputype: str = None, + memory_util: float = None, + node_id: str = None, + node_type: str = None, + total_memory: float = None, + used_memory: float = None, + ): + self.accelerator_type = accelerator_type + self.gpucount = gpucount + self.gpumetrics = gpumetrics + self.gputype = gputype + self.memory_util = memory_util + self.node_id = node_id + self.node_type = node_type + self.total_memory = total_memory + self.used_memory = used_memory + + def validate(self): + if self.gpumetrics: + for k in self.gpumetrics: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.accelerator_type is not None: + result['AcceleratorType'] = self.accelerator_type + if self.gpucount is not None: + result['GPUCount'] = self.gpucount + result['GPUMetrics'] = [] + if self.gpumetrics is not None: + for k in self.gpumetrics: + result['GPUMetrics'].append(k.to_map() if k else None) + if self.gputype is not None: + result['GPUType'] = self.gputype + if self.memory_util is not None: + result['MemoryUtil'] = self.memory_util + if self.node_id is not None: + result['NodeId'] = self.node_id + if self.node_type is not None: + result['NodeType'] = self.node_type + if self.total_memory is not None: + result['TotalMemory'] = self.total_memory + if self.used_memory is not None: + result['UsedMemory'] = self.used_memory + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('AcceleratorType') is not None: + self.accelerator_type = m.get('AcceleratorType') + if m.get('GPUCount') is not None: + self.gpucount = m.get('GPUCount') + self.gpumetrics = [] + if m.get('GPUMetrics') is not None: + for k in m.get('GPUMetrics'): + temp_model = GPUMetric() + self.gpumetrics.append(temp_model.from_map(k)) + if m.get('GPUType') is not None: + self.gputype = m.get('GPUType') + if m.get('MemoryUtil') is not None: + self.memory_util = m.get('MemoryUtil') + if m.get('NodeId') is not None: + self.node_id = m.get('NodeId') + if m.get('NodeType') is not None: + self.node_type = m.get('NodeType') + if m.get('TotalMemory') is not None: + self.total_memory = m.get('TotalMemory') + if m.get('UsedMemory') is not None: + self.used_memory = m.get('UsedMemory') return self @@ -1513,6 +1795,116 @@ def from_map(self, m: dict = None): return self +class ResourceAmount(TeaModel): + def __init__( + self, + cpu: str = None, + gpu: str = None, + gputype: str = None, + memory: str = None, + ): + self.cpu = cpu + self.gpu = gpu + self.gputype = gputype + self.memory = memory + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.cpu is not None: + result['CPU'] = self.cpu + if self.gpu is not None: + result['GPU'] = self.gpu + if self.gputype is not None: + result['GPUType'] = self.gputype + if self.memory is not None: + result['Memory'] = self.memory + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('CPU') is not None: + self.cpu = m.get('CPU') + if m.get('GPU') is not None: + self.gpu = m.get('GPU') + if m.get('GPUType') is not None: + self.gputype = m.get('GPUType') + if m.get('Memory') is not None: + self.memory = m.get('Memory') + return self + + +class NodePodInfo(TeaModel): + def __init__( + self, + phase: str = None, + pod_ip: str = None, + pod_name: str = None, + pod_namespace: str = None, + resource_spec: ResourceAmount = None, + workload_id: str = None, + workload_type: str = None, + ): + self.phase = phase + self.pod_ip = pod_ip + self.pod_name = pod_name + self.pod_namespace = pod_namespace + self.resource_spec = resource_spec + self.workload_id = workload_id + self.workload_type = workload_type + + def validate(self): + if self.resource_spec: + self.resource_spec.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.phase is not None: + result['Phase'] = self.phase + if self.pod_ip is not None: + result['PodIP'] = self.pod_ip + if self.pod_name is not None: + result['PodName'] = self.pod_name + if self.pod_namespace is not None: + result['PodNamespace'] = self.pod_namespace + if self.resource_spec is not None: + result['ResourceSpec'] = self.resource_spec.to_map() + if self.workload_id is not None: + result['WorkloadId'] = self.workload_id + if self.workload_type is not None: + result['WorkloadType'] = self.workload_type + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Phase') is not None: + self.phase = m.get('Phase') + if m.get('PodIP') is not None: + self.pod_ip = m.get('PodIP') + if m.get('PodName') is not None: + self.pod_name = m.get('PodName') + if m.get('PodNamespace') is not None: + self.pod_namespace = m.get('PodNamespace') + if m.get('ResourceSpec') is not None: + temp_model = ResourceAmount() + self.resource_spec = temp_model.from_map(m['ResourceSpec']) + if m.get('WorkloadId') is not None: + self.workload_id = m.get('WorkloadId') + if m.get('WorkloadType') is not None: + self.workload_type = m.get('WorkloadType') + return self + + class NodeType(TeaModel): def __init__( self, @@ -1795,25 +2187,56 @@ def from_map(self, m: dict = None): return self -class UserVpc(TeaModel): +class QueueInfo(TeaModel): def __init__( self, - default_route: str = None, - extended_cidrs: List[str] = None, - role_arn: str = None, - security_group_id: str = None, - switch_id: str = None, - vpc_id: str = None, + code: str = None, + code_type: str = None, + gmt_created_time: str = None, + gmt_dequeued_time: str = None, + gmt_enqueued_time: str = None, + gmt_position_modified_time: str = None, + name: str = None, + position: int = None, + priority: int = None, + queue_strategy: str = None, + quota_id: str = None, + reason: str = None, + resource: ResourceAmount = None, + status: str = None, + sub_status: str = None, + user_id: str = None, + user_name: str = None, + workload_id: str = None, + workload_name: str = None, + workload_type: str = None, + workspace_id: str = None, ): - self.default_route = default_route - self.extended_cidrs = extended_cidrs - self.role_arn = role_arn - self.security_group_id = security_group_id - self.switch_id = switch_id - self.vpc_id = vpc_id + self.code = code + self.code_type = code_type + self.gmt_created_time = gmt_created_time + self.gmt_dequeued_time = gmt_dequeued_time + self.gmt_enqueued_time = gmt_enqueued_time + self.gmt_position_modified_time = gmt_position_modified_time + self.name = name + self.position = position + self.priority = priority + self.queue_strategy = queue_strategy + self.quota_id = quota_id + self.reason = reason + self.resource = resource + self.status = status + self.sub_status = sub_status + self.user_id = user_id + self.user_name = user_name + self.workload_id = workload_id + self.workload_name = workload_name + self.workload_type = workload_type + self.workspace_id = workspace_id def validate(self): - pass + if self.resource: + self.resource.validate() def to_map(self): _map = super().to_map() @@ -1821,25 +2244,290 @@ def to_map(self): return _map result = dict() - if self.default_route is not None: - result['DefaultRoute'] = self.default_route - if self.extended_cidrs is not None: - result['ExtendedCIDRs'] = self.extended_cidrs - if self.role_arn is not None: - result['RoleArn'] = self.role_arn - if self.security_group_id is not None: - result['SecurityGroupId'] = self.security_group_id - if self.switch_id is not None: - result['SwitchId'] = self.switch_id - if self.vpc_id is not None: - result['VpcId'] = self.vpc_id + if self.code is not None: + result['Code'] = self.code + if self.code_type is not None: + result['CodeType'] = self.code_type + if self.gmt_created_time is not None: + result['GmtCreatedTime'] = self.gmt_created_time + if self.gmt_dequeued_time is not None: + result['GmtDequeuedTime'] = self.gmt_dequeued_time + if self.gmt_enqueued_time is not None: + result['GmtEnqueuedTime'] = self.gmt_enqueued_time + if self.gmt_position_modified_time is not None: + result['GmtPositionModifiedTime'] = self.gmt_position_modified_time + if self.name is not None: + result['Name'] = self.name + if self.position is not None: + result['Position'] = self.position + if self.priority is not None: + result['Priority'] = self.priority + if self.queue_strategy is not None: + result['QueueStrategy'] = self.queue_strategy + if self.quota_id is not None: + result['QuotaId'] = self.quota_id + if self.reason is not None: + result['Reason'] = self.reason + if self.resource is not None: + result['Resource'] = self.resource.to_map() + if self.status is not None: + result['Status'] = self.status + if self.sub_status is not None: + result['SubStatus'] = self.sub_status + if self.user_id is not None: + result['UserId'] = self.user_id + if self.user_name is not None: + result['UserName'] = self.user_name + if self.workload_id is not None: + result['WorkloadId'] = self.workload_id + if self.workload_name is not None: + result['WorkloadName'] = self.workload_name + if self.workload_type is not None: + result['WorkloadType'] = self.workload_type + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('DefaultRoute') is not None: - self.default_route = m.get('DefaultRoute') - if m.get('ExtendedCIDRs') is not None: + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('CodeType') is not None: + self.code_type = m.get('CodeType') + if m.get('GmtCreatedTime') is not None: + self.gmt_created_time = m.get('GmtCreatedTime') + if m.get('GmtDequeuedTime') is not None: + self.gmt_dequeued_time = m.get('GmtDequeuedTime') + if m.get('GmtEnqueuedTime') is not None: + self.gmt_enqueued_time = m.get('GmtEnqueuedTime') + if m.get('GmtPositionModifiedTime') is not None: + self.gmt_position_modified_time = m.get('GmtPositionModifiedTime') + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('Position') is not None: + self.position = m.get('Position') + if m.get('Priority') is not None: + self.priority = m.get('Priority') + if m.get('QueueStrategy') is not None: + self.queue_strategy = m.get('QueueStrategy') + if m.get('QuotaId') is not None: + self.quota_id = m.get('QuotaId') + if m.get('Reason') is not None: + self.reason = m.get('Reason') + if m.get('Resource') is not None: + temp_model = ResourceAmount() + self.resource = temp_model.from_map(m['Resource']) + if m.get('Status') is not None: + self.status = m.get('Status') + if m.get('SubStatus') is not None: + self.sub_status = m.get('SubStatus') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('UserName') is not None: + self.user_name = m.get('UserName') + if m.get('WorkloadId') is not None: + self.workload_id = m.get('WorkloadId') + if m.get('WorkloadName') is not None: + self.workload_name = m.get('WorkloadName') + if m.get('WorkloadType') is not None: + self.workload_type = m.get('WorkloadType') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + return self + + +class ResourceSpec(TeaModel): + def __init__( + self, + node_specs: List[NodeSpec] = None, + ): + self.node_specs = node_specs + + def validate(self): + if self.node_specs: + for k in self.node_specs: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + result['NodeSpecs'] = [] + if self.node_specs is not None: + for k in self.node_specs: + result['NodeSpecs'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + self.node_specs = [] + if m.get('NodeSpecs') is not None: + for k in m.get('NodeSpecs'): + temp_model = NodeSpec() + self.node_specs.append(temp_model.from_map(k)) + return self + + +class WorkspaceSpec(TeaModel): + def __init__( + self, + code: str = None, + code_type: str = None, + is_guaranteed_valid: bool = None, + is_over_sold_valid: bool = None, + reason: str = None, + spec: ResourceAmount = None, + spec_name: str = None, + ): + self.code = code + self.code_type = code_type + self.is_guaranteed_valid = is_guaranteed_valid + self.is_over_sold_valid = is_over_sold_valid + self.reason = reason + self.spec = spec + self.spec_name = spec_name + + def validate(self): + if self.spec: + self.spec.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.code is not None: + result['Code'] = self.code + if self.code_type is not None: + result['CodeType'] = self.code_type + if self.is_guaranteed_valid is not None: + result['IsGuaranteedValid'] = self.is_guaranteed_valid + if self.is_over_sold_valid is not None: + result['IsOverSoldValid'] = self.is_over_sold_valid + if self.reason is not None: + result['Reason'] = self.reason + if self.spec is not None: + result['Spec'] = self.spec.to_map() + if self.spec_name is not None: + result['SpecName'] = self.spec_name + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('CodeType') is not None: + self.code_type = m.get('CodeType') + if m.get('IsGuaranteedValid') is not None: + self.is_guaranteed_valid = m.get('IsGuaranteedValid') + if m.get('IsOverSoldValid') is not None: + self.is_over_sold_valid = m.get('IsOverSoldValid') + if m.get('Reason') is not None: + self.reason = m.get('Reason') + if m.get('Spec') is not None: + temp_model = ResourceAmount() + self.spec = temp_model.from_map(m['Spec']) + if m.get('SpecName') is not None: + self.spec_name = m.get('SpecName') + return self + + +class WorkspaceSpecs(TeaModel): + def __init__( + self, + product: str = None, + specs: List[WorkspaceSpec] = None, + workspace_id: str = None, + ): + self.product = product + self.specs = specs + self.workspace_id = workspace_id + + def validate(self): + if self.specs: + for k in self.specs: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.product is not None: + result['Product'] = self.product + result['Specs'] = [] + if self.specs is not None: + for k in self.specs: + result['Specs'].append(k.to_map() if k else None) + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Product') is not None: + self.product = m.get('Product') + self.specs = [] + if m.get('Specs') is not None: + for k in m.get('Specs'): + temp_model = WorkspaceSpec() + self.specs.append(temp_model.from_map(k)) + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + return self + + +class UserVpc(TeaModel): + def __init__( + self, + default_route: str = None, + extended_cidrs: List[str] = None, + role_arn: str = None, + security_group_id: str = None, + switch_id: str = None, + vpc_id: str = None, + ): + self.default_route = default_route + self.extended_cidrs = extended_cidrs + self.role_arn = role_arn + self.security_group_id = security_group_id + self.switch_id = switch_id + self.vpc_id = vpc_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.default_route is not None: + result['DefaultRoute'] = self.default_route + if self.extended_cidrs is not None: + result['ExtendedCIDRs'] = self.extended_cidrs + if self.role_arn is not None: + result['RoleArn'] = self.role_arn + if self.security_group_id is not None: + result['SecurityGroupId'] = self.security_group_id + if self.switch_id is not None: + result['SwitchId'] = self.switch_id + if self.vpc_id is not None: + result['VpcId'] = self.vpc_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('DefaultRoute') is not None: + self.default_route = m.get('DefaultRoute') + if m.get('ExtendedCIDRs') is not None: self.extended_cidrs = m.get('ExtendedCIDRs') if m.get('RoleArn') is not None: self.role_arn = m.get('RoleArn') @@ -1855,19 +2543,31 @@ def from_map(self, m: dict = None): class QuotaConfig(TeaModel): def __init__( self, + acs: ACS = None, cluster_id: str = None, default_gpudriver: str = None, + enable_preempt_subquota_workloads: bool = None, + resource_specs: List[WorkspaceSpecs] = None, support_gpudrivers: List[str] = None, support_rdma: bool = None, user_vpc: UserVpc = None, ): + self.acs = acs self.cluster_id = cluster_id self.default_gpudriver = default_gpudriver + self.enable_preempt_subquota_workloads = enable_preempt_subquota_workloads + self.resource_specs = resource_specs self.support_gpudrivers = support_gpudrivers self.support_rdma = support_rdma self.user_vpc = user_vpc def validate(self): + if self.acs: + self.acs.validate() + if self.resource_specs: + for k in self.resource_specs: + if k: + k.validate() if self.user_vpc: self.user_vpc.validate() @@ -1877,10 +2577,18 @@ def to_map(self): return _map result = dict() + if self.acs is not None: + result['ACS'] = self.acs.to_map() if self.cluster_id is not None: result['ClusterId'] = self.cluster_id if self.default_gpudriver is not None: result['DefaultGPUDriver'] = self.default_gpudriver + if self.enable_preempt_subquota_workloads is not None: + result['EnablePreemptSubquotaWorkloads'] = self.enable_preempt_subquota_workloads + result['ResourceSpecs'] = [] + if self.resource_specs is not None: + for k in self.resource_specs: + result['ResourceSpecs'].append(k.to_map() if k else None) if self.support_gpudrivers is not None: result['SupportGPUDrivers'] = self.support_gpudrivers if self.support_rdma is not None: @@ -1891,10 +2599,20 @@ def to_map(self): def from_map(self, m: dict = None): m = m or dict() + if m.get('ACS') is not None: + temp_model = ACS() + self.acs = temp_model.from_map(m['ACS']) if m.get('ClusterId') is not None: self.cluster_id = m.get('ClusterId') if m.get('DefaultGPUDriver') is not None: self.default_gpudriver = m.get('DefaultGPUDriver') + if m.get('EnablePreemptSubquotaWorkloads') is not None: + self.enable_preempt_subquota_workloads = m.get('EnablePreemptSubquotaWorkloads') + self.resource_specs = [] + if m.get('ResourceSpecs') is not None: + for k in m.get('ResourceSpecs'): + temp_model = WorkspaceSpecs() + self.resource_specs.append(temp_model.from_map(k)) if m.get('SupportGPUDrivers') is not None: self.support_gpudrivers = m.get('SupportGPUDrivers') if m.get('SupportRDMA') is not None: @@ -1905,51 +2623,6 @@ def from_map(self, m: dict = None): return self -class ResourceAmount(TeaModel): - def __init__( - self, - cpu: str = None, - gpu: str = None, - gputype: str = None, - memory: str = None, - ): - self.cpu = cpu - self.gpu = gpu - self.gputype = gputype - self.memory = memory - - def validate(self): - pass - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.cpu is not None: - result['CPU'] = self.cpu - if self.gpu is not None: - result['GPU'] = self.gpu - if self.gputype is not None: - result['GPUType'] = self.gputype - if self.memory is not None: - result['Memory'] = self.memory - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('CPU') is not None: - self.cpu = m.get('CPU') - if m.get('GPU') is not None: - self.gpu = m.get('GPU') - if m.get('GPUType') is not None: - self.gputype = m.get('GPUType') - if m.get('Memory') is not None: - self.memory = m.get('Memory') - return self - - class QuotaDetails(TeaModel): def __init__( self, @@ -2043,8 +2716,9 @@ def __init__( gmt_modified_time: str = None, labels: List[Label] = None, latest_operation_id: str = None, - min: AllocateStrategySpec = None, + min: ResourceSpec = None, parent_quota_id: str = None, + queue_strategy: str = None, quota_config: QuotaConfig = None, quota_details: QuotaDetails = None, quota_id: str = None, @@ -2066,6 +2740,7 @@ def __init__( self.latest_operation_id = latest_operation_id self.min = min self.parent_quota_id = parent_quota_id + self.queue_strategy = queue_strategy self.quota_config = quota_config self.quota_details = quota_details self.quota_id = quota_id @@ -2124,6 +2799,8 @@ def to_map(self): result['Min'] = self.min.to_map() if self.parent_quota_id is not None: result['ParentQuotaId'] = self.parent_quota_id + if self.queue_strategy is not None: + result['QueueStrategy'] = self.queue_strategy if self.quota_config is not None: result['QuotaConfig'] = self.quota_config.to_map() if self.quota_details is not None: @@ -2172,10 +2849,12 @@ def from_map(self, m: dict = None): if m.get('LatestOperationId') is not None: self.latest_operation_id = m.get('LatestOperationId') if m.get('Min') is not None: - temp_model = AllocateStrategySpec() + temp_model = ResourceSpec() self.min = temp_model.from_map(m['Min']) if m.get('ParentQuotaId') is not None: self.parent_quota_id = m.get('ParentQuotaId') + if m.get('QueueStrategy') is not None: + self.queue_strategy = m.get('QueueStrategy') if m.get('QuotaConfig') is not None: temp_model = QuotaConfig() self.quota_config = temp_model.from_map(m['QuotaConfig']) @@ -2209,6 +2888,45 @@ def from_map(self, m: dict = None): return self +class QuotaJob(TeaModel): + def __init__( + self, + queuing: int = None, + running: int = None, + total: int = None, + ): + self.queuing = queuing + self.running = running + self.total = total + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.queuing is not None: + result['Queuing'] = self.queuing + if self.running is not None: + result['Running'] = self.running + if self.total is not None: + result['Total'] = self.total + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Queuing') is not None: + self.queuing = m.get('Queuing') + if m.get('Running') is not None: + self.running = m.get('Running') + if m.get('Total') is not None: + self.total = m.get('Total') + return self + + class QuotaJobViewMetric(TeaModel): def __init__( self, @@ -2373,54 +3091,54 @@ def from_map(self, m: dict = None): return self -class QuotaUserViewMetric(TeaModel): +class QuotaNodeViewMetric(TeaModel): def __init__( self, - cpunode_number: int = None, cpuusage_rate: str = None, - cpu_job_names: List[str] = None, - cpu_node_names: List[str] = None, + created_time: str = None, disk_read_rate: str = None, disk_write_rate: str = None, - gpunode_number: int = None, - gpuusage_rate: str = None, - gpu_job_names: List[str] = None, - gpu_node_names: List[str] = None, - job_type: str = None, + gputype: str = None, memory_usage_rate: str = None, network_input_rate: str = None, network_output_rate: str = None, - node_names: List[str] = None, + node_id: str = None, + node_status: str = None, + node_type: str = None, + quota_id: str = None, request_cpu: int = None, request_gpu: int = None, request_memory: int = None, + task_id_map: Dict[str, Any] = None, total_cpu: int = None, total_gpu: int = None, total_memory: int = None, - user_id: str = None, + total_tasks: int = None, + user_ids: List[str] = None, + user_number: str = None, ): - self.cpunode_number = cpunode_number self.cpuusage_rate = cpuusage_rate - self.cpu_job_names = cpu_job_names - self.cpu_node_names = cpu_node_names + self.created_time = created_time self.disk_read_rate = disk_read_rate self.disk_write_rate = disk_write_rate - self.gpunode_number = gpunode_number - self.gpuusage_rate = gpuusage_rate - self.gpu_job_names = gpu_job_names - self.gpu_node_names = gpu_node_names - self.job_type = job_type + self.gputype = gputype self.memory_usage_rate = memory_usage_rate self.network_input_rate = network_input_rate self.network_output_rate = network_output_rate - self.node_names = node_names - self.request_cpu = request_cpu - self.request_gpu = request_gpu + self.node_id = node_id + self.node_status = node_status + self.node_type = node_type + self.quota_id = quota_id + self.request_cpu = request_cpu + self.request_gpu = request_gpu self.request_memory = request_memory + self.task_id_map = task_id_map self.total_cpu = total_cpu self.total_gpu = total_gpu self.total_memory = total_memory - self.user_id = user_id + self.total_tasks = total_tasks + self.user_ids = user_ids + self.user_number = user_number def validate(self): pass @@ -2431,125 +3149,123 @@ def to_map(self): return _map result = dict() - if self.cpunode_number is not None: - result['CPUNodeNumber'] = self.cpunode_number if self.cpuusage_rate is not None: result['CPUUsageRate'] = self.cpuusage_rate - if self.cpu_job_names is not None: - result['CpuJobNames'] = self.cpu_job_names - if self.cpu_node_names is not None: - result['CpuNodeNames'] = self.cpu_node_names + if self.created_time is not None: + result['CreatedTime'] = self.created_time if self.disk_read_rate is not None: result['DiskReadRate'] = self.disk_read_rate if self.disk_write_rate is not None: result['DiskWriteRate'] = self.disk_write_rate - if self.gpunode_number is not None: - result['GPUNodeNumber'] = self.gpunode_number - if self.gpuusage_rate is not None: - result['GPUUsageRate'] = self.gpuusage_rate - if self.gpu_job_names is not None: - result['GpuJobNames'] = self.gpu_job_names - if self.gpu_node_names is not None: - result['GpuNodeNames'] = self.gpu_node_names - if self.job_type is not None: - result['JobType'] = self.job_type + if self.gputype is not None: + result['GPUType'] = self.gputype if self.memory_usage_rate is not None: result['MemoryUsageRate'] = self.memory_usage_rate if self.network_input_rate is not None: result['NetworkInputRate'] = self.network_input_rate if self.network_output_rate is not None: result['NetworkOutputRate'] = self.network_output_rate - if self.node_names is not None: - result['NodeNames'] = self.node_names + if self.node_id is not None: + result['NodeID'] = self.node_id + if self.node_status is not None: + result['NodeStatus'] = self.node_status + if self.node_type is not None: + result['NodeType'] = self.node_type + if self.quota_id is not None: + result['QuotaId'] = self.quota_id if self.request_cpu is not None: result['RequestCPU'] = self.request_cpu if self.request_gpu is not None: result['RequestGPU'] = self.request_gpu if self.request_memory is not None: result['RequestMemory'] = self.request_memory + if self.task_id_map is not None: + result['TaskIdMap'] = self.task_id_map if self.total_cpu is not None: result['TotalCPU'] = self.total_cpu if self.total_gpu is not None: result['TotalGPU'] = self.total_gpu if self.total_memory is not None: result['TotalMemory'] = self.total_memory - if self.user_id is not None: - result['UserId'] = self.user_id + if self.total_tasks is not None: + result['TotalTasks'] = self.total_tasks + if self.user_ids is not None: + result['UserIDs'] = self.user_ids + if self.user_number is not None: + result['UserNumber'] = self.user_number return result def from_map(self, m: dict = None): m = m or dict() - if m.get('CPUNodeNumber') is not None: - self.cpunode_number = m.get('CPUNodeNumber') if m.get('CPUUsageRate') is not None: self.cpuusage_rate = m.get('CPUUsageRate') - if m.get('CpuJobNames') is not None: - self.cpu_job_names = m.get('CpuJobNames') - if m.get('CpuNodeNames') is not None: - self.cpu_node_names = m.get('CpuNodeNames') + if m.get('CreatedTime') is not None: + self.created_time = m.get('CreatedTime') if m.get('DiskReadRate') is not None: self.disk_read_rate = m.get('DiskReadRate') if m.get('DiskWriteRate') is not None: self.disk_write_rate = m.get('DiskWriteRate') - if m.get('GPUNodeNumber') is not None: - self.gpunode_number = m.get('GPUNodeNumber') - if m.get('GPUUsageRate') is not None: - self.gpuusage_rate = m.get('GPUUsageRate') - if m.get('GpuJobNames') is not None: - self.gpu_job_names = m.get('GpuJobNames') - if m.get('GpuNodeNames') is not None: - self.gpu_node_names = m.get('GpuNodeNames') - if m.get('JobType') is not None: - self.job_type = m.get('JobType') + if m.get('GPUType') is not None: + self.gputype = m.get('GPUType') if m.get('MemoryUsageRate') is not None: self.memory_usage_rate = m.get('MemoryUsageRate') if m.get('NetworkInputRate') is not None: self.network_input_rate = m.get('NetworkInputRate') if m.get('NetworkOutputRate') is not None: self.network_output_rate = m.get('NetworkOutputRate') - if m.get('NodeNames') is not None: - self.node_names = m.get('NodeNames') + if m.get('NodeID') is not None: + self.node_id = m.get('NodeID') + if m.get('NodeStatus') is not None: + self.node_status = m.get('NodeStatus') + if m.get('NodeType') is not None: + self.node_type = m.get('NodeType') + if m.get('QuotaId') is not None: + self.quota_id = m.get('QuotaId') if m.get('RequestCPU') is not None: self.request_cpu = m.get('RequestCPU') if m.get('RequestGPU') is not None: self.request_gpu = m.get('RequestGPU') if m.get('RequestMemory') is not None: self.request_memory = m.get('RequestMemory') + if m.get('TaskIdMap') is not None: + self.task_id_map = m.get('TaskIdMap') if m.get('TotalCPU') is not None: self.total_cpu = m.get('TotalCPU') if m.get('TotalGPU') is not None: self.total_gpu = m.get('TotalGPU') if m.get('TotalMemory') is not None: self.total_memory = m.get('TotalMemory') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') + if m.get('TotalTasks') is not None: + self.total_tasks = m.get('TotalTasks') + if m.get('UserIDs') is not None: + self.user_ids = m.get('UserIDs') + if m.get('UserNumber') is not None: + self.user_number = m.get('UserNumber') return self -class ResourceGroup(TeaModel): +class WorkloadDetails(TeaModel): def __init__( self, - creator_id: str = None, - gmt_created_time: str = None, - gmt_modified_time: str = None, - name: str = None, - node_count: int = None, - resource_group_id: str = None, - user_vpc: UserVpc = None, - workspace_id: str = None, + dlc: QuotaJob = None, + dsw: QuotaJob = None, + eas: QuotaJob = None, + summary: QuotaJob = None, ): - self.creator_id = creator_id - self.gmt_created_time = gmt_created_time - self.gmt_modified_time = gmt_modified_time - self.name = name - self.node_count = node_count - self.resource_group_id = resource_group_id - self.user_vpc = user_vpc - self.workspace_id = workspace_id + self.dlc = dlc + self.dsw = dsw + self.eas = eas + self.summary = summary def validate(self): - if self.user_vpc: - self.user_vpc.validate() + if self.dlc: + self.dlc.validate() + if self.dsw: + self.dsw.validate() + if self.eas: + self.eas.validate() + if self.summary: + self.summary.validate() def to_map(self): _map = super().to_map() @@ -2557,62 +3273,57 @@ def to_map(self): return _map result = dict() - if self.creator_id is not None: - result['CreatorID'] = self.creator_id - if self.gmt_created_time is not None: - result['GmtCreatedTime'] = self.gmt_created_time - if self.gmt_modified_time is not None: - result['GmtModifiedTime'] = self.gmt_modified_time - if self.name is not None: - result['Name'] = self.name - if self.node_count is not None: - result['NodeCount'] = self.node_count - if self.resource_group_id is not None: - result['ResourceGroupID'] = self.resource_group_id - if self.user_vpc is not None: - result['UserVpc'] = self.user_vpc.to_map() - if self.workspace_id is not None: - result['WorkspaceID'] = self.workspace_id + if self.dlc is not None: + result['DLC'] = self.dlc.to_map() + if self.dsw is not None: + result['DSW'] = self.dsw.to_map() + if self.eas is not None: + result['EAS'] = self.eas.to_map() + if self.summary is not None: + result['Summary'] = self.summary.to_map() return result def from_map(self, m: dict = None): m = m or dict() - if m.get('CreatorID') is not None: - self.creator_id = m.get('CreatorID') - if m.get('GmtCreatedTime') is not None: - self.gmt_created_time = m.get('GmtCreatedTime') - if m.get('GmtModifiedTime') is not None: - self.gmt_modified_time = m.get('GmtModifiedTime') - if m.get('Name') is not None: - self.name = m.get('Name') - if m.get('NodeCount') is not None: - self.node_count = m.get('NodeCount') - if m.get('ResourceGroupID') is not None: - self.resource_group_id = m.get('ResourceGroupID') - if m.get('UserVpc') is not None: - temp_model = UserVpc() - self.user_vpc = temp_model.from_map(m['UserVpc']) - if m.get('WorkspaceID') is not None: - self.workspace_id = m.get('WorkspaceID') + if m.get('DLC') is not None: + temp_model = QuotaJob() + self.dlc = temp_model.from_map(m['DLC']) + if m.get('DSW') is not None: + temp_model = QuotaJob() + self.dsw = temp_model.from_map(m['DSW']) + if m.get('EAS') is not None: + temp_model = QuotaJob() + self.eas = temp_model.from_map(m['EAS']) + if m.get('Summary') is not None: + temp_model = QuotaJob() + self.summary = temp_model.from_map(m['Summary']) return self -class ResourceGroupMetric(TeaModel): +class QuotaTopo(TeaModel): def __init__( self, - gpu_type: str = None, - metrics: List[Metric] = None, - resource_group_id: str = None, + depth: str = None, + parent_quota_id: str = None, + quota_details: QuotaDetails = None, + quota_id: str = None, + quota_name: str = None, + resource_type: str = None, + workload_details: WorkloadDetails = None, ): - self.gpu_type = gpu_type - self.metrics = metrics - self.resource_group_id = resource_group_id + self.depth = depth + self.parent_quota_id = parent_quota_id + self.quota_details = quota_details + self.quota_id = quota_id + self.quota_name = quota_name + self.resource_type = resource_type + self.workload_details = workload_details def validate(self): - if self.metrics: - for k in self.metrics: - if k: - k.validate() + if self.quota_details: + self.quota_details.validate() + if self.workload_details: + self.workload_details.validate() def to_map(self): _map = super().to_map() @@ -2620,65 +3331,57 @@ def to_map(self): return _map result = dict() - if self.gpu_type is not None: - result['GpuType'] = self.gpu_type - result['Metrics'] = [] - if self.metrics is not None: - for k in self.metrics: - result['Metrics'].append(k.to_map() if k else None) - if self.resource_group_id is not None: - result['ResourceGroupID'] = self.resource_group_id + if self.depth is not None: + result['Depth'] = self.depth + if self.parent_quota_id is not None: + result['ParentQuotaId'] = self.parent_quota_id + if self.quota_details is not None: + result['QuotaDetails'] = self.quota_details.to_map() + if self.quota_id is not None: + result['QuotaId'] = self.quota_id + if self.quota_name is not None: + result['QuotaName'] = self.quota_name + if self.resource_type is not None: + result['ResourceType'] = self.resource_type + if self.workload_details is not None: + result['WorkloadDetails'] = self.workload_details.to_map() return result def from_map(self, m: dict = None): m = m or dict() - if m.get('GpuType') is not None: - self.gpu_type = m.get('GpuType') - self.metrics = [] - if m.get('Metrics') is not None: - for k in m.get('Metrics'): - temp_model = Metric() - self.metrics.append(temp_model.from_map(k)) - if m.get('ResourceGroupID') is not None: - self.resource_group_id = m.get('ResourceGroupID') + if m.get('Depth') is not None: + self.depth = m.get('Depth') + if m.get('ParentQuotaId') is not None: + self.parent_quota_id = m.get('ParentQuotaId') + if m.get('QuotaDetails') is not None: + temp_model = QuotaDetails() + self.quota_details = temp_model.from_map(m['QuotaDetails']) + if m.get('QuotaId') is not None: + self.quota_id = m.get('QuotaId') + if m.get('QuotaName') is not None: + self.quota_name = m.get('QuotaName') + if m.get('ResourceType') is not None: + self.resource_type = m.get('ResourceType') + if m.get('WorkloadDetails') is not None: + temp_model = WorkloadDetails() + self.workload_details = temp_model.from_map(m['WorkloadDetails']) return self -class ResourceOperation(TeaModel): +class QuotaUserResources(TeaModel): def __init__( self, - creator_id: str = None, - gmt_created_time: str = None, - gmt_end_time: str = None, - gmt_modified_time: str = None, - gmt_start_time: str = None, - object_id: str = None, - object_type: str = None, - operation_description: str = None, - operation_id: str = None, - operation_spec_json: str = None, - operation_type: str = None, - reason_code: str = None, - reason_message: str = None, - status: str = None, + submitted: ResourceAmount = None, + used: ResourceAmount = None, ): - self.creator_id = creator_id - self.gmt_created_time = gmt_created_time - self.gmt_end_time = gmt_end_time - self.gmt_modified_time = gmt_modified_time - self.gmt_start_time = gmt_start_time - self.object_id = object_id - self.object_type = object_type - self.operation_description = operation_description - self.operation_id = operation_id - self.operation_spec_json = operation_spec_json - self.operation_type = operation_type - self.reason_code = reason_code - self.reason_message = reason_message - self.status = status + self.submitted = submitted + self.used = used def validate(self): - pass + if self.submitted: + self.submitted.validate() + if self.used: + self.used.validate() def to_map(self): _map = super().to_map() @@ -2686,70 +3389,71 @@ def to_map(self): return _map result = dict() - if self.creator_id is not None: - result['CreatorId'] = self.creator_id - if self.gmt_created_time is not None: - result['GmtCreatedTime'] = self.gmt_created_time - if self.gmt_end_time is not None: - result['GmtEndTime'] = self.gmt_end_time - if self.gmt_modified_time is not None: - result['GmtModifiedTime'] = self.gmt_modified_time - if self.gmt_start_time is not None: - result['GmtStartTime'] = self.gmt_start_time - if self.object_id is not None: - result['ObjectId'] = self.object_id - if self.object_type is not None: - result['ObjectType'] = self.object_type - if self.operation_description is not None: - result['OperationDescription'] = self.operation_description - if self.operation_id is not None: - result['OperationId'] = self.operation_id - if self.operation_spec_json is not None: - result['OperationSpecJson'] = self.operation_spec_json - if self.operation_type is not None: - result['OperationType'] = self.operation_type - if self.reason_code is not None: - result['ReasonCode'] = self.reason_code - if self.reason_message is not None: - result['ReasonMessage'] = self.reason_message - if self.status is not None: - result['Status'] = self.status + if self.submitted is not None: + result['Submitted'] = self.submitted.to_map() + if self.used is not None: + result['Used'] = self.used.to_map() return result def from_map(self, m: dict = None): m = m or dict() - if m.get('CreatorId') is not None: - self.creator_id = m.get('CreatorId') - if m.get('GmtCreatedTime') is not None: - self.gmt_created_time = m.get('GmtCreatedTime') - if m.get('GmtEndTime') is not None: - self.gmt_end_time = m.get('GmtEndTime') - if m.get('GmtModifiedTime') is not None: - self.gmt_modified_time = m.get('GmtModifiedTime') - if m.get('GmtStartTime') is not None: - self.gmt_start_time = m.get('GmtStartTime') - if m.get('ObjectId') is not None: - self.object_id = m.get('ObjectId') - if m.get('ObjectType') is not None: - self.object_type = m.get('ObjectType') - if m.get('OperationDescription') is not None: - self.operation_description = m.get('OperationDescription') - if m.get('OperationId') is not None: - self.operation_id = m.get('OperationId') - if m.get('OperationSpecJson') is not None: - self.operation_spec_json = m.get('OperationSpecJson') - if m.get('OperationType') is not None: - self.operation_type = m.get('OperationType') - if m.get('ReasonCode') is not None: - self.reason_code = m.get('ReasonCode') - if m.get('ReasonMessage') is not None: - self.reason_message = m.get('ReasonMessage') - if m.get('Status') is not None: - self.status = m.get('Status') + if m.get('Submitted') is not None: + temp_model = ResourceAmount() + self.submitted = temp_model.from_map(m['Submitted']) + if m.get('Used') is not None: + temp_model = ResourceAmount() + self.used = temp_model.from_map(m['Used']) return self -class UserViewMetric(TeaModel): +class QuotaUser(TeaModel): + def __init__( + self, + resources: QuotaUserResources = None, + user_id: str = None, + username: str = None, + workload_count: int = None, + ): + self.resources = resources + self.user_id = user_id + self.username = username + self.workload_count = workload_count + + def validate(self): + if self.resources: + self.resources.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.resources is not None: + result['Resources'] = self.resources.to_map() + if self.user_id is not None: + result['UserId'] = self.user_id + if self.username is not None: + result['Username'] = self.username + if self.workload_count is not None: + result['WorkloadCount'] = self.workload_count + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Resources') is not None: + temp_model = QuotaUserResources() + self.resources = temp_model.from_map(m['Resources']) + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('Username') is not None: + self.username = m.get('Username') + if m.get('WorkloadCount') is not None: + self.workload_count = m.get('WorkloadCount') + return self + + +class QuotaUserViewMetric(TeaModel): def __init__( self, cpunode_number: int = None, @@ -2770,7 +3474,6 @@ def __init__( request_cpu: int = None, request_gpu: int = None, request_memory: int = None, - resource_group_id: str = None, total_cpu: int = None, total_gpu: int = None, total_memory: int = None, @@ -2794,7 +3497,6 @@ def __init__( self.request_cpu = request_cpu self.request_gpu = request_gpu self.request_memory = request_memory - self.resource_group_id = resource_group_id self.total_cpu = total_cpu self.total_gpu = total_gpu self.total_memory = total_memory @@ -2845,8 +3547,6 @@ def to_map(self): result['RequestGPU'] = self.request_gpu if self.request_memory is not None: result['RequestMemory'] = self.request_memory - if self.resource_group_id is not None: - result['ResourceGroupId'] = self.resource_group_id if self.total_cpu is not None: result['TotalCPU'] = self.total_cpu if self.total_gpu is not None: @@ -2895,8 +3595,6 @@ def from_map(self, m: dict = None): self.request_gpu = m.get('RequestGPU') if m.get('RequestMemory') is not None: self.request_memory = m.get('RequestMemory') - if m.get('ResourceGroupId') is not None: - self.resource_group_id = m.get('ResourceGroupId') if m.get('TotalCPU') is not None: self.total_cpu = m.get('TotalCPU') if m.get('TotalGPU') is not None: @@ -2908,21 +3606,30 @@ def from_map(self, m: dict = None): return self -class CreateAI4DDefaultBucketResponseBody(TeaModel): +class ResourceGroup(TeaModel): def __init__( self, - extranet_endpoint: str = None, - intranet_endpoint: str = None, + creator_id: str = None, + gmt_created_time: str = None, + gmt_modified_time: str = None, name: str = None, - request_id: str = None, + node_count: int = None, + resource_group_id: str = None, + user_vpc: UserVpc = None, + workspace_id: str = None, ): - self.extranet_endpoint = extranet_endpoint - self.intranet_endpoint = intranet_endpoint + self.creator_id = creator_id + self.gmt_created_time = gmt_created_time + self.gmt_modified_time = gmt_modified_time self.name = name - self.request_id = request_id + self.node_count = node_count + self.resource_group_id = resource_group_id + self.user_vpc = user_vpc + self.workspace_id = workspace_id def validate(self): - pass + if self.user_vpc: + self.user_vpc.validate() def to_map(self): _map = super().to_map() @@ -2930,46 +3637,62 @@ def to_map(self): return _map result = dict() - if self.extranet_endpoint is not None: - result['ExtranetEndpoint'] = self.extranet_endpoint - if self.intranet_endpoint is not None: - result['IntranetEndpoint'] = self.intranet_endpoint + if self.creator_id is not None: + result['CreatorID'] = self.creator_id + if self.gmt_created_time is not None: + result['GmtCreatedTime'] = self.gmt_created_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time if self.name is not None: result['Name'] = self.name - if self.request_id is not None: - result['RequestId'] = self.request_id + if self.node_count is not None: + result['NodeCount'] = self.node_count + if self.resource_group_id is not None: + result['ResourceGroupID'] = self.resource_group_id + if self.user_vpc is not None: + result['UserVpc'] = self.user_vpc.to_map() + if self.workspace_id is not None: + result['WorkspaceID'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ExtranetEndpoint') is not None: - self.extranet_endpoint = m.get('ExtranetEndpoint') - if m.get('IntranetEndpoint') is not None: - self.intranet_endpoint = m.get('IntranetEndpoint') + if m.get('CreatorID') is not None: + self.creator_id = m.get('CreatorID') + if m.get('GmtCreatedTime') is not None: + self.gmt_created_time = m.get('GmtCreatedTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') if m.get('Name') is not None: self.name = m.get('Name') - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') + if m.get('NodeCount') is not None: + self.node_count = m.get('NodeCount') + if m.get('ResourceGroupID') is not None: + self.resource_group_id = m.get('ResourceGroupID') + if m.get('UserVpc') is not None: + temp_model = UserVpc() + self.user_vpc = temp_model.from_map(m['UserVpc']) + if m.get('WorkspaceID') is not None: + self.workspace_id = m.get('WorkspaceID') return self -class CreateAI4DDefaultBucketResponse(TeaModel): +class ResourceGroupMetric(TeaModel): def __init__( self, - headers: Dict[str, str] = None, - status_code: int = None, - body: CreateAI4DDefaultBucketResponseBody = None, + gpu_type: str = None, + metrics: List[Metric] = None, + resource_group_id: str = None, ): - self.headers = headers - self.status_code = status_code - self.body = body + self.gpu_type = gpu_type + self.metrics = metrics + self.resource_group_id = resource_group_id def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() + if self.metrics: + for k in self.metrics: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -2977,36 +3700,62 @@ def to_map(self): return _map result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + if self.gpu_type is not None: + result['GpuType'] = self.gpu_type + result['Metrics'] = [] + if self.metrics is not None: + for k in self.metrics: + result['Metrics'].append(k.to_map() if k else None) + if self.resource_group_id is not None: + result['ResourceGroupID'] = self.resource_group_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = CreateAI4DDefaultBucketResponseBody() - self.body = temp_model.from_map(m['body']) + if m.get('GpuType') is not None: + self.gpu_type = m.get('GpuType') + self.metrics = [] + if m.get('Metrics') is not None: + for k in m.get('Metrics'): + temp_model = Metric() + self.metrics.append(temp_model.from_map(k)) + if m.get('ResourceGroupID') is not None: + self.resource_group_id = m.get('ResourceGroupID') return self -class CreateAI4DSerivceRequest(TeaModel): +class ResourceOperation(TeaModel): def __init__( self, - inference_spec: Dict[str, Any] = None, - service_type: str = None, - workspace_id: str = None, + creator_id: str = None, + gmt_created_time: str = None, + gmt_end_time: str = None, + gmt_modified_time: str = None, + gmt_start_time: str = None, + object_id: str = None, + object_type: str = None, + operation_description: str = None, + operation_id: str = None, + operation_spec_json: str = None, + operation_type: str = None, + reason_code: str = None, + reason_message: str = None, + status: str = None, ): - self.inference_spec = inference_spec - self.service_type = service_type - self.workspace_id = workspace_id + self.creator_id = creator_id + self.gmt_created_time = gmt_created_time + self.gmt_end_time = gmt_end_time + self.gmt_modified_time = gmt_modified_time + self.gmt_start_time = gmt_start_time + self.object_id = object_id + self.object_type = object_type + self.operation_description = operation_description + self.operation_id = operation_id + self.operation_spec_json = operation_spec_json + self.operation_type = operation_type + self.reason_code = reason_code + self.reason_message = reason_message + self.status = status def validate(self): pass @@ -3017,114 +3766,81 @@ def to_map(self): return _map result = dict() - if self.inference_spec is not None: - result['InferenceSpec'] = self.inference_spec - if self.service_type is not None: - result['ServiceType'] = self.service_type - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('InferenceSpec') is not None: - self.inference_spec = m.get('InferenceSpec') - if m.get('ServiceType') is not None: - self.service_type = m.get('ServiceType') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') - return self - - -class CreateAI4DSerivceResponseBody(TeaModel): - def __init__( - self, - request_id: str = None, - service_name: str = None, - ): - self.request_id = request_id - self.service_name = service_name - - def validate(self): - pass - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.request_id is not None: - result['RequestId'] = self.request_id - if self.service_name is not None: - result['ServiceName'] = self.service_name - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') - if m.get('ServiceName') is not None: - self.service_name = m.get('ServiceName') - return self - - -class CreateAI4DSerivceResponse(TeaModel): - def __init__( - self, - headers: Dict[str, str] = None, - status_code: int = None, - body: CreateAI4DSerivceResponseBody = None, - ): - self.headers = headers - self.status_code = status_code - self.body = body - - def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + if self.creator_id is not None: + result['CreatorId'] = self.creator_id + if self.gmt_created_time is not None: + result['GmtCreatedTime'] = self.gmt_created_time + if self.gmt_end_time is not None: + result['GmtEndTime'] = self.gmt_end_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.gmt_start_time is not None: + result['GmtStartTime'] = self.gmt_start_time + if self.object_id is not None: + result['ObjectId'] = self.object_id + if self.object_type is not None: + result['ObjectType'] = self.object_type + if self.operation_description is not None: + result['OperationDescription'] = self.operation_description + if self.operation_id is not None: + result['OperationId'] = self.operation_id + if self.operation_spec_json is not None: + result['OperationSpecJson'] = self.operation_spec_json + if self.operation_type is not None: + result['OperationType'] = self.operation_type + if self.reason_code is not None: + result['ReasonCode'] = self.reason_code + if self.reason_message is not None: + result['ReasonMessage'] = self.reason_message + if self.status is not None: + result['Status'] = self.status return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = CreateAI4DSerivceResponseBody() - self.body = temp_model.from_map(m['body']) + if m.get('CreatorId') is not None: + self.creator_id = m.get('CreatorId') + if m.get('GmtCreatedTime') is not None: + self.gmt_created_time = m.get('GmtCreatedTime') + if m.get('GmtEndTime') is not None: + self.gmt_end_time = m.get('GmtEndTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('GmtStartTime') is not None: + self.gmt_start_time = m.get('GmtStartTime') + if m.get('ObjectId') is not None: + self.object_id = m.get('ObjectId') + if m.get('ObjectType') is not None: + self.object_type = m.get('ObjectType') + if m.get('OperationDescription') is not None: + self.operation_description = m.get('OperationDescription') + if m.get('OperationId') is not None: + self.operation_id = m.get('OperationId') + if m.get('OperationSpecJson') is not None: + self.operation_spec_json = m.get('OperationSpecJson') + if m.get('OperationType') is not None: + self.operation_type = m.get('OperationType') + if m.get('ReasonCode') is not None: + self.reason_code = m.get('ReasonCode') + if m.get('ReasonMessage') is not None: + self.reason_message = m.get('ReasonMessage') + if m.get('Status') is not None: + self.status = m.get('Status') return self -class CreateAlgorithmRequest(TeaModel): +class SpotPriceItem(TeaModel): def __init__( self, - algorithm_description: str = None, - algorithm_name: str = None, - display_name: str = None, - workspace_id: str = None, + instance_type: str = None, + spot_discount: float = None, + timestamp: str = None, + zone_id: str = None, ): - self.algorithm_description = algorithm_description - self.algorithm_name = algorithm_name - self.display_name = display_name - self.workspace_id = workspace_id + self.instance_type = instance_type + self.spot_discount = spot_discount + self.timestamp = timestamp + self.zone_id = zone_id def validate(self): pass @@ -3135,37 +3851,39 @@ def to_map(self): return _map result = dict() - if self.algorithm_description is not None: - result['AlgorithmDescription'] = self.algorithm_description - if self.algorithm_name is not None: - result['AlgorithmName'] = self.algorithm_name - if self.display_name is not None: - result['DisplayName'] = self.display_name - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.instance_type is not None: + result['InstanceType'] = self.instance_type + if self.spot_discount is not None: + result['SpotDiscount'] = self.spot_discount + if self.timestamp is not None: + result['Timestamp'] = self.timestamp + if self.zone_id is not None: + result['ZoneId'] = self.zone_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('AlgorithmDescription') is not None: - self.algorithm_description = m.get('AlgorithmDescription') - if m.get('AlgorithmName') is not None: - self.algorithm_name = m.get('AlgorithmName') - if m.get('DisplayName') is not None: - self.display_name = m.get('DisplayName') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') + if m.get('InstanceType') is not None: + self.instance_type = m.get('InstanceType') + if m.get('SpotDiscount') is not None: + self.spot_discount = m.get('SpotDiscount') + if m.get('Timestamp') is not None: + self.timestamp = m.get('Timestamp') + if m.get('ZoneId') is not None: + self.zone_id = m.get('ZoneId') return self -class CreateAlgorithmResponseBody(TeaModel): +class SpotStockPreview(TeaModel): def __init__( self, - algorithm_id: str = None, - request_id: str = None, + instance_type: str = None, + spot_discount: float = None, + stock_status: str = None, ): - self.algorithm_id = algorithm_id - self.request_id = request_id + self.instance_type = instance_type + self.spot_discount = spot_discount + self.stock_status = stock_status def validate(self): pass @@ -3176,38 +3894,36 @@ def to_map(self): return _map result = dict() - if self.algorithm_id is not None: - result['AlgorithmId'] = self.algorithm_id - if self.request_id is not None: - result['RequestId'] = self.request_id + if self.instance_type is not None: + result['InstanceType'] = self.instance_type + if self.spot_discount is not None: + result['SpotDiscount'] = self.spot_discount + if self.stock_status is not None: + result['StockStatus'] = self.stock_status return result def from_map(self, m: dict = None): m = m or dict() - if m.get('AlgorithmId') is not None: - self.algorithm_id = m.get('AlgorithmId') - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') + if m.get('InstanceType') is not None: + self.instance_type = m.get('InstanceType') + if m.get('SpotDiscount') is not None: + self.spot_discount = m.get('SpotDiscount') + if m.get('StockStatus') is not None: + self.stock_status = m.get('StockStatus') return self -class CreateAlgorithmResponse(TeaModel): +class TimeRangeFilter(TeaModel): def __init__( self, - headers: Dict[str, str] = None, - status_code: int = None, - body: CreateAlgorithmResponseBody = None, + end_time: str = None, + start_time: str = None, ): - self.headers = headers - self.status_code = status_code - self.body = body + self.end_time = end_time + self.start_time = start_time def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() + pass def to_map(self): _map = super().to_map() @@ -3215,36 +3931,74 @@ def to_map(self): return _map result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + if self.end_time is not None: + result['EndTime'] = self.end_time + if self.start_time is not None: + result['StartTime'] = self.start_time return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = CreateAlgorithmResponseBody() - self.body = temp_model.from_map(m['body']) + if m.get('EndTime') is not None: + self.end_time = m.get('EndTime') + if m.get('StartTime') is not None: + self.start_time = m.get('StartTime') return self -class CreateAlgorithmVersionRequest(TeaModel): +class UserViewMetric(TeaModel): def __init__( self, - algorithm_spec: AlgorithmSpec = None, + cpunode_number: int = None, + cpuusage_rate: str = None, + cpu_job_names: List[str] = None, + cpu_node_names: List[str] = None, + disk_read_rate: str = None, + disk_write_rate: str = None, + gpunode_number: int = None, + gpuusage_rate: str = None, + gpu_job_names: List[str] = None, + gpu_node_names: List[str] = None, + job_type: str = None, + memory_usage_rate: str = None, + network_input_rate: str = None, + network_output_rate: str = None, + node_names: List[str] = None, + request_cpu: int = None, + request_gpu: int = None, + request_memory: int = None, + resource_group_id: str = None, + total_cpu: int = None, + total_gpu: int = None, + total_memory: int = None, + user_id: str = None, ): - self.algorithm_spec = algorithm_spec + self.cpunode_number = cpunode_number + self.cpuusage_rate = cpuusage_rate + self.cpu_job_names = cpu_job_names + self.cpu_node_names = cpu_node_names + self.disk_read_rate = disk_read_rate + self.disk_write_rate = disk_write_rate + self.gpunode_number = gpunode_number + self.gpuusage_rate = gpuusage_rate + self.gpu_job_names = gpu_job_names + self.gpu_node_names = gpu_node_names + self.job_type = job_type + self.memory_usage_rate = memory_usage_rate + self.network_input_rate = network_input_rate + self.network_output_rate = network_output_rate + self.node_names = node_names + self.request_cpu = request_cpu + self.request_gpu = request_gpu + self.request_memory = request_memory + self.resource_group_id = resource_group_id + self.total_cpu = total_cpu + self.total_gpu = total_gpu + self.total_memory = total_memory + self.user_id = user_id def validate(self): - if self.algorithm_spec: - self.algorithm_spec.validate() + pass def to_map(self): _map = super().to_map() @@ -3252,24 +4006,111 @@ def to_map(self): return _map result = dict() - if self.algorithm_spec is not None: - result['AlgorithmSpec'] = self.algorithm_spec.to_map() - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('AlgorithmSpec') is not None: - temp_model = AlgorithmSpec() - self.algorithm_spec = temp_model.from_map(m['AlgorithmSpec']) - return self + if self.cpunode_number is not None: + result['CPUNodeNumber'] = self.cpunode_number + if self.cpuusage_rate is not None: + result['CPUUsageRate'] = self.cpuusage_rate + if self.cpu_job_names is not None: + result['CpuJobNames'] = self.cpu_job_names + if self.cpu_node_names is not None: + result['CpuNodeNames'] = self.cpu_node_names + if self.disk_read_rate is not None: + result['DiskReadRate'] = self.disk_read_rate + if self.disk_write_rate is not None: + result['DiskWriteRate'] = self.disk_write_rate + if self.gpunode_number is not None: + result['GPUNodeNumber'] = self.gpunode_number + if self.gpuusage_rate is not None: + result['GPUUsageRate'] = self.gpuusage_rate + if self.gpu_job_names is not None: + result['GpuJobNames'] = self.gpu_job_names + if self.gpu_node_names is not None: + result['GpuNodeNames'] = self.gpu_node_names + if self.job_type is not None: + result['JobType'] = self.job_type + if self.memory_usage_rate is not None: + result['MemoryUsageRate'] = self.memory_usage_rate + if self.network_input_rate is not None: + result['NetworkInputRate'] = self.network_input_rate + if self.network_output_rate is not None: + result['NetworkOutputRate'] = self.network_output_rate + if self.node_names is not None: + result['NodeNames'] = self.node_names + if self.request_cpu is not None: + result['RequestCPU'] = self.request_cpu + if self.request_gpu is not None: + result['RequestGPU'] = self.request_gpu + if self.request_memory is not None: + result['RequestMemory'] = self.request_memory + if self.resource_group_id is not None: + result['ResourceGroupId'] = self.resource_group_id + if self.total_cpu is not None: + result['TotalCPU'] = self.total_cpu + if self.total_gpu is not None: + result['TotalGPU'] = self.total_gpu + if self.total_memory is not None: + result['TotalMemory'] = self.total_memory + if self.user_id is not None: + result['UserId'] = self.user_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('CPUNodeNumber') is not None: + self.cpunode_number = m.get('CPUNodeNumber') + if m.get('CPUUsageRate') is not None: + self.cpuusage_rate = m.get('CPUUsageRate') + if m.get('CpuJobNames') is not None: + self.cpu_job_names = m.get('CpuJobNames') + if m.get('CpuNodeNames') is not None: + self.cpu_node_names = m.get('CpuNodeNames') + if m.get('DiskReadRate') is not None: + self.disk_read_rate = m.get('DiskReadRate') + if m.get('DiskWriteRate') is not None: + self.disk_write_rate = m.get('DiskWriteRate') + if m.get('GPUNodeNumber') is not None: + self.gpunode_number = m.get('GPUNodeNumber') + if m.get('GPUUsageRate') is not None: + self.gpuusage_rate = m.get('GPUUsageRate') + if m.get('GpuJobNames') is not None: + self.gpu_job_names = m.get('GpuJobNames') + if m.get('GpuNodeNames') is not None: + self.gpu_node_names = m.get('GpuNodeNames') + if m.get('JobType') is not None: + self.job_type = m.get('JobType') + if m.get('MemoryUsageRate') is not None: + self.memory_usage_rate = m.get('MemoryUsageRate') + if m.get('NetworkInputRate') is not None: + self.network_input_rate = m.get('NetworkInputRate') + if m.get('NetworkOutputRate') is not None: + self.network_output_rate = m.get('NetworkOutputRate') + if m.get('NodeNames') is not None: + self.node_names = m.get('NodeNames') + if m.get('RequestCPU') is not None: + self.request_cpu = m.get('RequestCPU') + if m.get('RequestGPU') is not None: + self.request_gpu = m.get('RequestGPU') + if m.get('RequestMemory') is not None: + self.request_memory = m.get('RequestMemory') + if m.get('ResourceGroupId') is not None: + self.resource_group_id = m.get('ResourceGroupId') + if m.get('TotalCPU') is not None: + self.total_cpu = m.get('TotalCPU') + if m.get('TotalGPU') is not None: + self.total_gpu = m.get('TotalGPU') + if m.get('TotalMemory') is not None: + self.total_memory = m.get('TotalMemory') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + return self -class CreateAlgorithmVersionShrinkRequest(TeaModel): +class CheckInstanceWebTerminalRequest(TeaModel): def __init__( self, - algorithm_spec_shrink: str = None, + check_info: str = None, ): - self.algorithm_spec_shrink = algorithm_spec_shrink + self.check_info = check_info def validate(self): pass @@ -3280,25 +4121,23 @@ def to_map(self): return _map result = dict() - if self.algorithm_spec_shrink is not None: - result['AlgorithmSpec'] = self.algorithm_spec_shrink + if self.check_info is not None: + result['CheckInfo'] = self.check_info return result def from_map(self, m: dict = None): m = m or dict() - if m.get('AlgorithmSpec') is not None: - self.algorithm_spec_shrink = m.get('AlgorithmSpec') + if m.get('CheckInfo') is not None: + self.check_info = m.get('CheckInfo') return self -class CreateAlgorithmVersionResponseBody(TeaModel): +class CheckInstanceWebTerminalResponseBody(TeaModel): def __init__( self, - algorithm_id: str = None, - algorithm_version: str = None, + request_id: str = None, ): - self.algorithm_id = algorithm_id - self.algorithm_version = algorithm_version + self.request_id = request_id def validate(self): pass @@ -3309,36 +4148,29 @@ def to_map(self): return _map result = dict() - if self.algorithm_id is not None: - result['AlgorithmId'] = self.algorithm_id - if self.algorithm_version is not None: - result['AlgorithmVersion'] = self.algorithm_version + if self.request_id is not None: + result['RequestId'] = self.request_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('AlgorithmId') is not None: - self.algorithm_id = m.get('AlgorithmId') - if m.get('AlgorithmVersion') is not None: - self.algorithm_version = m.get('AlgorithmVersion') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') return self -class CreateAlgorithmVersionResponse(TeaModel): +class CheckInstanceWebTerminalResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: CreateAlgorithmVersionResponseBody = None, + body: CheckInstanceWebTerminalResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -3363,77 +4195,22 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = CreateAlgorithmVersionResponseBody() + temp_model = CheckInstanceWebTerminalResponseBody() self.body = temp_model.from_map(m['body']) return self -class CreateComponentRequest(TeaModel): +class CreateAI4DDefaultBucketResponseBody(TeaModel): def __init__( self, - description: str = None, - display_name: str = None, - labels: List[Label] = None, + extranet_endpoint: str = None, + intranet_endpoint: str = None, name: str = None, - workspace_id: str = None, - ): - self.description = description - self.display_name = display_name - self.labels = labels - self.name = name - self.workspace_id = workspace_id - - def validate(self): - if self.labels: - for k in self.labels: - if k: - k.validate() - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.description is not None: - result['Description'] = self.description - if self.display_name is not None: - result['DisplayName'] = self.display_name - result['Labels'] = [] - if self.labels is not None: - for k in self.labels: - result['Labels'].append(k.to_map() if k else None) - if self.name is not None: - result['Name'] = self.name - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('Description') is not None: - self.description = m.get('Description') - if m.get('DisplayName') is not None: - self.display_name = m.get('DisplayName') - self.labels = [] - if m.get('Labels') is not None: - for k in m.get('Labels'): - temp_model = Label() - self.labels.append(temp_model.from_map(k)) - if m.get('Name') is not None: - self.name = m.get('Name') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') - return self - - -class CreateComponentResponseBody(TeaModel): - def __init__( - self, - component_id: str = None, request_id: str = None, ): - self.component_id = component_id + self.extranet_endpoint = extranet_endpoint + self.intranet_endpoint = intranet_endpoint + self.name = name self.request_id = request_id def validate(self): @@ -3445,36 +4222,41 @@ def to_map(self): return _map result = dict() - if self.component_id is not None: - result['ComponentId'] = self.component_id + if self.extranet_endpoint is not None: + result['ExtranetEndpoint'] = self.extranet_endpoint + if self.intranet_endpoint is not None: + result['IntranetEndpoint'] = self.intranet_endpoint + if self.name is not None: + result['Name'] = self.name if self.request_id is not None: result['RequestId'] = self.request_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ComponentId') is not None: - self.component_id = m.get('ComponentId') + if m.get('ExtranetEndpoint') is not None: + self.extranet_endpoint = m.get('ExtranetEndpoint') + if m.get('IntranetEndpoint') is not None: + self.intranet_endpoint = m.get('IntranetEndpoint') + if m.get('Name') is not None: + self.name = m.get('Name') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') return self -class CreateComponentResponse(TeaModel): +class CreateAI4DDefaultBucketResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: CreateComponentResponseBody = None, + body: CreateAI4DDefaultBucketResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -3499,19 +4281,23 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = CreateComponentResponseBody() + temp_model = CreateAI4DDefaultBucketResponseBody() self.body = temp_model.from_map(m['body']) return self -class CreateComponentVersionRequestLabels(TeaModel): +class CreateAI4DSerivceRequest(TeaModel): def __init__( self, - key: str = None, - value: str = None, + inference_spec: Dict[str, Any] = None, + service_type: str = None, + workspace_id: str = None, ): - self.key = key - self.value = value + self.inference_spec = inference_spec + # This parameter is required. + self.service_type = service_type + # This parameter is required. + self.workspace_id = workspace_id def validate(self): pass @@ -3522,94 +4308,33 @@ def to_map(self): return _map result = dict() - if self.key is not None: - result['Key'] = self.key - if self.value is not None: - result['Value'] = self.value + if self.inference_spec is not None: + result['InferenceSpec'] = self.inference_spec + if self.service_type is not None: + result['ServiceType'] = self.service_type + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Key') is not None: - self.key = m.get('Key') - if m.get('Value') is not None: - self.value = m.get('Value') + if m.get('InferenceSpec') is not None: + self.inference_spec = m.get('InferenceSpec') + if m.get('ServiceType') is not None: + self.service_type = m.get('ServiceType') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class CreateComponentVersionRequest(TeaModel): +class CreateAI4DSerivceResponseBody(TeaModel): def __init__( self, - config_dir: Location = None, - description: str = None, - labels: List[CreateComponentVersionRequestLabels] = None, - spec: ComponentSpec = None, - version: str = None, + request_id: str = None, + service_name: str = None, ): - self.config_dir = config_dir - self.description = description - self.labels = labels - self.spec = spec - self.version = version - - def validate(self): - if self.config_dir: - self.config_dir.validate() - if self.labels: - for k in self.labels: - if k: - k.validate() - if self.spec: - self.spec.validate() - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.config_dir is not None: - result['ConfigDir'] = self.config_dir.to_map() - if self.description is not None: - result['Description'] = self.description - result['Labels'] = [] - if self.labels is not None: - for k in self.labels: - result['Labels'].append(k.to_map() if k else None) - if self.spec is not None: - result['Spec'] = self.spec.to_map() - if self.version is not None: - result['Version'] = self.version - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('ConfigDir') is not None: - temp_model = Location() - self.config_dir = temp_model.from_map(m['ConfigDir']) - if m.get('Description') is not None: - self.description = m.get('Description') - self.labels = [] - if m.get('Labels') is not None: - for k in m.get('Labels'): - temp_model = CreateComponentVersionRequestLabels() - self.labels.append(temp_model.from_map(k)) - if m.get('Spec') is not None: - temp_model = ComponentSpec() - self.spec = temp_model.from_map(m['Spec']) - if m.get('Version') is not None: - self.version = m.get('Version') - return self - - -class CreateComponentVersionResponseBody(TeaModel): - def __init__( - self, - instance_job_id: str = None, - request_id: str = None, - ): - self.instance_job_id = instance_job_id self.request_id = request_id + self.service_name = service_name def validate(self): pass @@ -3620,36 +4345,33 @@ def to_map(self): return _map result = dict() - if self.instance_job_id is not None: - result['InstanceJobId'] = self.instance_job_id if self.request_id is not None: result['RequestId'] = self.request_id + if self.service_name is not None: + result['ServiceName'] = self.service_name return result def from_map(self, m: dict = None): m = m or dict() - if m.get('InstanceJobId') is not None: - self.instance_job_id = m.get('InstanceJobId') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('ServiceName') is not None: + self.service_name = m.get('ServiceName') return self -class CreateComponentVersionResponse(TeaModel): +class CreateAI4DSerivceResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: CreateComponentVersionResponseBody = None, + body: CreateAI4DSerivceResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -3674,39 +4396,26 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = CreateComponentVersionResponseBody() + temp_model = CreateAI4DSerivceResponseBody() self.body = temp_model.from_map(m['body']) return self -class CreateQuotaRequest(TeaModel): +class CreateAlgorithmRequest(TeaModel): def __init__( self, - allocate_strategy: str = None, - description: str = None, - labels: List[Label] = None, - min: AllocateStrategySpec = None, - parent_quota_id: str = None, - quota_name: str = None, - resource_group_ids: List[str] = None, - resource_type: str = None, + algorithm_description: str = None, + algorithm_name: str = None, + display_name: str = None, + workspace_id: str = None, ): - self.allocate_strategy = allocate_strategy - self.description = description - self.labels = labels - self.min = min - self.parent_quota_id = parent_quota_id - self.quota_name = quota_name - self.resource_group_ids = resource_group_ids - self.resource_type = resource_type + self.algorithm_description = algorithm_description + self.algorithm_name = algorithm_name + self.display_name = display_name + self.workspace_id = workspace_id def validate(self): - if self.labels: - for k in self.labels: - if k: - k.validate() - if self.min: - self.min.validate() + pass def to_map(self): _map = super().to_map() @@ -3714,58 +4423,36 @@ def to_map(self): return _map result = dict() - if self.allocate_strategy is not None: - result['AllocateStrategy'] = self.allocate_strategy - if self.description is not None: - result['Description'] = self.description - result['Labels'] = [] - if self.labels is not None: - for k in self.labels: - result['Labels'].append(k.to_map() if k else None) - if self.min is not None: - result['Min'] = self.min.to_map() - if self.parent_quota_id is not None: - result['ParentQuotaId'] = self.parent_quota_id - if self.quota_name is not None: - result['QuotaName'] = self.quota_name - if self.resource_group_ids is not None: - result['ResourceGroupIds'] = self.resource_group_ids - if self.resource_type is not None: - result['ResourceType'] = self.resource_type + if self.algorithm_description is not None: + result['AlgorithmDescription'] = self.algorithm_description + if self.algorithm_name is not None: + result['AlgorithmName'] = self.algorithm_name + if self.display_name is not None: + result['DisplayName'] = self.display_name + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('AllocateStrategy') is not None: - self.allocate_strategy = m.get('AllocateStrategy') - if m.get('Description') is not None: - self.description = m.get('Description') - self.labels = [] - if m.get('Labels') is not None: - for k in m.get('Labels'): - temp_model = Label() - self.labels.append(temp_model.from_map(k)) - if m.get('Min') is not None: - temp_model = AllocateStrategySpec() - self.min = temp_model.from_map(m['Min']) - if m.get('ParentQuotaId') is not None: - self.parent_quota_id = m.get('ParentQuotaId') - if m.get('QuotaName') is not None: - self.quota_name = m.get('QuotaName') - if m.get('ResourceGroupIds') is not None: - self.resource_group_ids = m.get('ResourceGroupIds') - if m.get('ResourceType') is not None: - self.resource_type = m.get('ResourceType') + if m.get('AlgorithmDescription') is not None: + self.algorithm_description = m.get('AlgorithmDescription') + if m.get('AlgorithmName') is not None: + self.algorithm_name = m.get('AlgorithmName') + if m.get('DisplayName') is not None: + self.display_name = m.get('DisplayName') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class CreateQuotaResponseBody(TeaModel): +class CreateAlgorithmResponseBody(TeaModel): def __init__( self, - quota_id: str = None, + algorithm_id: str = None, request_id: str = None, ): - self.quota_id = quota_id + self.algorithm_id = algorithm_id self.request_id = request_id def validate(self): @@ -3777,36 +4464,33 @@ def to_map(self): return _map result = dict() - if self.quota_id is not None: - result['QuotaId'] = self.quota_id + if self.algorithm_id is not None: + result['AlgorithmId'] = self.algorithm_id if self.request_id is not None: result['RequestId'] = self.request_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('QuotaId') is not None: - self.quota_id = m.get('QuotaId') + if m.get('AlgorithmId') is not None: + self.algorithm_id = m.get('AlgorithmId') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') return self -class CreateQuotaResponse(TeaModel): +class CreateAlgorithmResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: CreateQuotaResponseBody = None, + body: CreateAlgorithmResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -3831,29 +4515,21 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = CreateQuotaResponseBody() + temp_model = CreateAlgorithmResponseBody() self.body = temp_model.from_map(m['body']) return self -class CreateResourceGroupRequest(TeaModel): +class CreateAlgorithmVersionRequest(TeaModel): def __init__( self, - computing_resource_provider: str = None, - description: str = None, - name: str = None, - resource_type: str = None, - user_vpc: UserVpc = None, + algorithm_spec: AlgorithmSpec = None, ): - self.computing_resource_provider = computing_resource_provider - self.description = description - self.name = name - self.resource_type = resource_type - self.user_vpc = user_vpc + self.algorithm_spec = algorithm_spec def validate(self): - if self.user_vpc: - self.user_vpc.validate() + if self.algorithm_spec: + self.algorithm_spec.validate() def to_map(self): _map = super().to_map() @@ -3861,42 +4537,24 @@ def to_map(self): return _map result = dict() - if self.computing_resource_provider is not None: - result['ComputingResourceProvider'] = self.computing_resource_provider - if self.description is not None: - result['Description'] = self.description - if self.name is not None: - result['Name'] = self.name - if self.resource_type is not None: - result['ResourceType'] = self.resource_type - if self.user_vpc is not None: - result['UserVpc'] = self.user_vpc.to_map() + if self.algorithm_spec is not None: + result['AlgorithmSpec'] = self.algorithm_spec.to_map() return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ComputingResourceProvider') is not None: - self.computing_resource_provider = m.get('ComputingResourceProvider') - if m.get('Description') is not None: - self.description = m.get('Description') - if m.get('Name') is not None: - self.name = m.get('Name') - if m.get('ResourceType') is not None: - self.resource_type = m.get('ResourceType') - if m.get('UserVpc') is not None: - temp_model = UserVpc() - self.user_vpc = temp_model.from_map(m['UserVpc']) + if m.get('AlgorithmSpec') is not None: + temp_model = AlgorithmSpec() + self.algorithm_spec = temp_model.from_map(m['AlgorithmSpec']) return self -class CreateResourceGroupResponseBody(TeaModel): +class CreateAlgorithmVersionShrinkRequest(TeaModel): def __init__( self, - request_id: str = None, - resource_group_id: str = None, + algorithm_spec_shrink: str = None, ): - self.request_id = request_id - self.resource_group_id = resource_group_id + self.algorithm_spec_shrink = algorithm_spec_shrink def validate(self): pass @@ -3907,36 +4565,62 @@ def to_map(self): return _map result = dict() - if self.request_id is not None: - result['RequestId'] = self.request_id - if self.resource_group_id is not None: - result['ResourceGroupID'] = self.resource_group_id + if self.algorithm_spec_shrink is not None: + result['AlgorithmSpec'] = self.algorithm_spec_shrink return result def from_map(self, m: dict = None): m = m or dict() - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') - if m.get('ResourceGroupID') is not None: - self.resource_group_id = m.get('ResourceGroupID') + if m.get('AlgorithmSpec') is not None: + self.algorithm_spec_shrink = m.get('AlgorithmSpec') return self -class CreateResourceGroupResponse(TeaModel): +class CreateAlgorithmVersionResponseBody(TeaModel): def __init__( self, - headers: Dict[str, str] = None, - status_code: int = None, - body: CreateResourceGroupResponseBody = None, + algorithm_id: str = None, + algorithm_version: str = None, ): - self.headers = headers - self.status_code = status_code + self.algorithm_id = algorithm_id + self.algorithm_version = algorithm_version + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.algorithm_id is not None: + result['AlgorithmId'] = self.algorithm_id + if self.algorithm_version is not None: + result['AlgorithmVersion'] = self.algorithm_version + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('AlgorithmId') is not None: + self.algorithm_id = m.get('AlgorithmId') + if m.get('AlgorithmVersion') is not None: + self.algorithm_version = m.get('AlgorithmVersion') + return self + + +class CreateAlgorithmVersionResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateAlgorithmVersionResponseBody = None, + ): + self.headers = headers + self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -3961,20 +4645,34 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = CreateResourceGroupResponseBody() + temp_model = CreateAlgorithmVersionResponseBody() self.body = temp_model.from_map(m['body']) return self -class CreateServiceIdentityRoleRequest(TeaModel): +class CreateComponentRequest(TeaModel): def __init__( self, - role_name: str = None, + description: str = None, + display_name: str = None, + labels: List[Label] = None, + name: str = None, + workspace_id: str = None, ): - self.role_name = role_name + # This parameter is required. + self.description = description + self.display_name = display_name + self.labels = labels + # This parameter is required. + self.name = name + # This parameter is required. + self.workspace_id = workspace_id def validate(self): - pass + if self.labels: + for k in self.labels: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -3982,25 +4680,46 @@ def to_map(self): return _map result = dict() - if self.role_name is not None: - result['RoleName'] = self.role_name + if self.description is not None: + result['Description'] = self.description + if self.display_name is not None: + result['DisplayName'] = self.display_name + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.name is not None: + result['Name'] = self.name + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('RoleName') is not None: - self.role_name = m.get('RoleName') + if m.get('Description') is not None: + self.description = m.get('Description') + if m.get('DisplayName') is not None: + self.display_name = m.get('DisplayName') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = Label() + self.labels.append(temp_model.from_map(k)) + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class CreateServiceIdentityRoleResponseBody(TeaModel): +class CreateComponentResponseBody(TeaModel): def __init__( self, + component_id: str = None, request_id: str = None, - role_name: str = None, ): + self.component_id = component_id self.request_id = request_id - self.role_name = role_name def validate(self): pass @@ -4011,36 +4730,33 @@ def to_map(self): return _map result = dict() + if self.component_id is not None: + result['ComponentId'] = self.component_id if self.request_id is not None: result['RequestId'] = self.request_id - if self.role_name is not None: - result['RoleName'] = self.role_name return result def from_map(self, m: dict = None): m = m or dict() + if m.get('ComponentId') is not None: + self.component_id = m.get('ComponentId') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('RoleName') is not None: - self.role_name = m.get('RoleName') return self -class CreateServiceIdentityRoleResponse(TeaModel): +class CreateComponentResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: CreateServiceIdentityRoleResponseBody = None, + body: CreateComponentResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -4065,25 +4781,19 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = CreateServiceIdentityRoleResponseBody() + temp_model = CreateComponentResponseBody() self.body = temp_model.from_map(m['body']) return self -class CreateTrainingJobRequestComputeResourceInstanceSpec(TeaModel): +class CreateComponentVersionRequestLabels(TeaModel): def __init__( self, - cpu: str = None, - gpu: str = None, - gputype: str = None, - memory: str = None, - shared_memory: str = None, + key: str = None, + value: str = None, ): - self.cpu = cpu - self.gpu = gpu - self.gputype = gputype - self.memory = memory - self.shared_memory = shared_memory + self.key = key + self.value = value def validate(self): pass @@ -4094,51 +4804,45 @@ def to_map(self): return _map result = dict() - if self.cpu is not None: - result['CPU'] = self.cpu - if self.gpu is not None: - result['GPU'] = self.gpu - if self.gputype is not None: - result['GPUType'] = self.gputype - if self.memory is not None: - result['Memory'] = self.memory - if self.shared_memory is not None: - result['SharedMemory'] = self.shared_memory + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value return result def from_map(self, m: dict = None): m = m or dict() - if m.get('CPU') is not None: - self.cpu = m.get('CPU') - if m.get('GPU') is not None: - self.gpu = m.get('GPU') - if m.get('GPUType') is not None: - self.gputype = m.get('GPUType') - if m.get('Memory') is not None: - self.memory = m.get('Memory') - if m.get('SharedMemory') is not None: - self.shared_memory = m.get('SharedMemory') + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') return self -class CreateTrainingJobRequestComputeResource(TeaModel): +class CreateComponentVersionRequest(TeaModel): def __init__( self, - ecs_count: int = None, - ecs_spec: str = None, - instance_count: int = None, - instance_spec: CreateTrainingJobRequestComputeResourceInstanceSpec = None, - resource_id: str = None, + config_dir: Location = None, + description: str = None, + labels: List[CreateComponentVersionRequestLabels] = None, + spec: ComponentSpec = None, + version: str = None, ): - self.ecs_count = ecs_count - self.ecs_spec = ecs_spec - self.instance_count = instance_count - self.instance_spec = instance_spec - self.resource_id = resource_id + self.config_dir = config_dir + self.description = description + self.labels = labels + self.spec = spec + self.version = version def validate(self): - if self.instance_spec: - self.instance_spec.validate() + if self.config_dir: + self.config_dir.validate() + if self.labels: + for k in self.labels: + if k: + k.validate() + if self.spec: + self.spec.validate() def to_map(self): _map = super().to_map() @@ -4146,42 +4850,48 @@ def to_map(self): return _map result = dict() - if self.ecs_count is not None: - result['EcsCount'] = self.ecs_count - if self.ecs_spec is not None: - result['EcsSpec'] = self.ecs_spec - if self.instance_count is not None: - result['InstanceCount'] = self.instance_count - if self.instance_spec is not None: - result['InstanceSpec'] = self.instance_spec.to_map() - if self.resource_id is not None: - result['ResourceId'] = self.resource_id + if self.config_dir is not None: + result['ConfigDir'] = self.config_dir.to_map() + if self.description is not None: + result['Description'] = self.description + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.spec is not None: + result['Spec'] = self.spec.to_map() + if self.version is not None: + result['Version'] = self.version return result def from_map(self, m: dict = None): m = m or dict() - if m.get('EcsCount') is not None: - self.ecs_count = m.get('EcsCount') - if m.get('EcsSpec') is not None: - self.ecs_spec = m.get('EcsSpec') - if m.get('InstanceCount') is not None: - self.instance_count = m.get('InstanceCount') - if m.get('InstanceSpec') is not None: - temp_model = CreateTrainingJobRequestComputeResourceInstanceSpec() - self.instance_spec = temp_model.from_map(m['InstanceSpec']) - if m.get('ResourceId') is not None: - self.resource_id = m.get('ResourceId') + if m.get('ConfigDir') is not None: + temp_model = Location() + self.config_dir = temp_model.from_map(m['ConfigDir']) + if m.get('Description') is not None: + self.description = m.get('Description') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = CreateComponentVersionRequestLabels() + self.labels.append(temp_model.from_map(k)) + if m.get('Spec') is not None: + temp_model = ComponentSpec() + self.spec = temp_model.from_map(m['Spec']) + if m.get('Version') is not None: + self.version = m.get('Version') return self -class CreateTrainingJobRequestHyperParameters(TeaModel): +class CreateComponentVersionResponseBody(TeaModel): def __init__( self, - name: str = None, - value: str = None, + instance_job_id: str = None, + request_id: str = None, ): - self.name = name - self.value = value + self.instance_job_id = instance_job_id + self.request_id = request_id def validate(self): pass @@ -4192,34 +4902,35 @@ def to_map(self): return _map result = dict() - if self.name is not None: - result['Name'] = self.name - if self.value is not None: - result['Value'] = self.value + if self.instance_job_id is not None: + result['InstanceJobId'] = self.instance_job_id + if self.request_id is not None: + result['RequestId'] = self.request_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Name') is not None: - self.name = m.get('Name') - if m.get('Value') is not None: - self.value = m.get('Value') + if m.get('InstanceJobId') is not None: + self.instance_job_id = m.get('InstanceJobId') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') return self -class CreateTrainingJobRequestInputChannels(TeaModel): +class CreateComponentVersionResponse(TeaModel): def __init__( self, - dataset_id: str = None, - input_uri: str = None, - name: str = None, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateComponentVersionResponseBody = None, ): - self.dataset_id = dataset_id - self.input_uri = input_uri - self.name = name + self.headers = headers + self.status_code = status_code + self.body = body def validate(self): - pass + if self.body: + self.body.validate() def to_map(self): _map = super().to_map() @@ -4227,33 +4938,34 @@ def to_map(self): return _map result = dict() - if self.dataset_id is not None: - result['DatasetId'] = self.dataset_id - if self.input_uri is not None: - result['InputUri'] = self.input_uri - if self.name is not None: - result['Name'] = self.name + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() return result def from_map(self, m: dict = None): m = m or dict() - if m.get('DatasetId') is not None: - self.dataset_id = m.get('DatasetId') - if m.get('InputUri') is not None: - self.input_uri = m.get('InputUri') - if m.get('Name') is not None: - self.name = m.get('Name') + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateComponentVersionResponseBody() + self.body = temp_model.from_map(m['body']) return self -class CreateTrainingJobRequestLabels(TeaModel): +class CreateInstanceWebTerminalResponseBody(TeaModel): def __init__( self, - key: str = None, - value: str = None, + request_id: str = None, + web_terminal_id: str = None, ): - self.key = key - self.value = value + self.request_id = request_id + self.web_terminal_id = web_terminal_id def validate(self): pass @@ -4264,34 +4976,35 @@ def to_map(self): return _map result = dict() - if self.key is not None: - result['Key'] = self.key - if self.value is not None: - result['Value'] = self.value + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.web_terminal_id is not None: + result['WebTerminalId'] = self.web_terminal_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Key') is not None: - self.key = m.get('Key') - if m.get('Value') is not None: - self.value = m.get('Value') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('WebTerminalId') is not None: + self.web_terminal_id = m.get('WebTerminalId') return self -class CreateTrainingJobRequestOutputChannels(TeaModel): +class CreateInstanceWebTerminalResponse(TeaModel): def __init__( self, - dataset_id: str = None, - name: str = None, - output_uri: str = None, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateInstanceWebTerminalResponseBody = None, ): - self.dataset_id = dataset_id - self.name = name - self.output_uri = output_uri + self.headers = headers + self.status_code = status_code + self.body = body def validate(self): - pass + if self.body: + self.body.validate() def to_map(self): _map = super().to_map() @@ -4299,34 +5012,60 @@ def to_map(self): return _map result = dict() - if self.dataset_id is not None: - result['DatasetId'] = self.dataset_id - if self.name is not None: - result['Name'] = self.name - if self.output_uri is not None: - result['OutputUri'] = self.output_uri + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() return result def from_map(self, m: dict = None): m = m or dict() - if m.get('DatasetId') is not None: - self.dataset_id = m.get('DatasetId') - if m.get('Name') is not None: - self.name = m.get('Name') - if m.get('OutputUri') is not None: - self.output_uri = m.get('OutputUri') + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateInstanceWebTerminalResponseBody() + self.body = temp_model.from_map(m['body']) return self -class CreateTrainingJobRequestScheduler(TeaModel): +class CreateQuotaRequest(TeaModel): def __init__( self, - max_running_time_in_seconds: int = None, + allocate_strategy: str = None, + description: str = None, + labels: List[Label] = None, + min: ResourceSpec = None, + parent_quota_id: str = None, + queue_strategy: str = None, + quota_config: QuotaConfig = None, + quota_name: str = None, + resource_group_ids: List[str] = None, + resource_type: str = None, ): - self.max_running_time_in_seconds = max_running_time_in_seconds + self.allocate_strategy = allocate_strategy + self.description = description + self.labels = labels + self.min = min + self.parent_quota_id = parent_quota_id + self.queue_strategy = queue_strategy + self.quota_config = quota_config + self.quota_name = quota_name + self.resource_group_ids = resource_group_ids + self.resource_type = resource_type def validate(self): - pass + if self.labels: + for k in self.labels: + if k: + k.validate() + if self.min: + self.min.validate() + if self.quota_config: + self.quota_config.validate() def to_map(self): _map = super().to_map() @@ -4334,29 +5073,69 @@ def to_map(self): return _map result = dict() - if self.max_running_time_in_seconds is not None: - result['MaxRunningTimeInSeconds'] = self.max_running_time_in_seconds + if self.allocate_strategy is not None: + result['AllocateStrategy'] = self.allocate_strategy + if self.description is not None: + result['Description'] = self.description + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.min is not None: + result['Min'] = self.min.to_map() + if self.parent_quota_id is not None: + result['ParentQuotaId'] = self.parent_quota_id + if self.queue_strategy is not None: + result['QueueStrategy'] = self.queue_strategy + if self.quota_config is not None: + result['QuotaConfig'] = self.quota_config.to_map() + if self.quota_name is not None: + result['QuotaName'] = self.quota_name + if self.resource_group_ids is not None: + result['ResourceGroupIds'] = self.resource_group_ids + if self.resource_type is not None: + result['ResourceType'] = self.resource_type return result def from_map(self, m: dict = None): m = m or dict() - if m.get('MaxRunningTimeInSeconds') is not None: - self.max_running_time_in_seconds = m.get('MaxRunningTimeInSeconds') + if m.get('AllocateStrategy') is not None: + self.allocate_strategy = m.get('AllocateStrategy') + if m.get('Description') is not None: + self.description = m.get('Description') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = Label() + self.labels.append(temp_model.from_map(k)) + if m.get('Min') is not None: + temp_model = ResourceSpec() + self.min = temp_model.from_map(m['Min']) + if m.get('ParentQuotaId') is not None: + self.parent_quota_id = m.get('ParentQuotaId') + if m.get('QueueStrategy') is not None: + self.queue_strategy = m.get('QueueStrategy') + if m.get('QuotaConfig') is not None: + temp_model = QuotaConfig() + self.quota_config = temp_model.from_map(m['QuotaConfig']) + if m.get('QuotaName') is not None: + self.quota_name = m.get('QuotaName') + if m.get('ResourceGroupIds') is not None: + self.resource_group_ids = m.get('ResourceGroupIds') + if m.get('ResourceType') is not None: + self.resource_type = m.get('ResourceType') return self -class CreateTrainingJobRequestUserVpc(TeaModel): +class CreateQuotaResponseBody(TeaModel): def __init__( self, - extended_cidrs: List[str] = None, - security_group_id: str = None, - switch_id: str = None, - vpc_id: str = None, + quota_id: str = None, + request_id: str = None, ): - self.extended_cidrs = extended_cidrs - self.security_group_id = security_group_id - self.switch_id = switch_id - self.vpc_id = vpc_id + # Quota Id + self.quota_id = quota_id + self.request_id = request_id def validate(self): pass @@ -4367,93 +5146,35 @@ def to_map(self): return _map result = dict() - if self.extended_cidrs is not None: - result['ExtendedCIDRs'] = self.extended_cidrs - if self.security_group_id is not None: - result['SecurityGroupId'] = self.security_group_id - if self.switch_id is not None: - result['SwitchId'] = self.switch_id - if self.vpc_id is not None: - result['VpcId'] = self.vpc_id + if self.quota_id is not None: + result['QuotaId'] = self.quota_id + if self.request_id is not None: + result['RequestId'] = self.request_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ExtendedCIDRs') is not None: - self.extended_cidrs = m.get('ExtendedCIDRs') - if m.get('SecurityGroupId') is not None: - self.security_group_id = m.get('SecurityGroupId') - if m.get('SwitchId') is not None: - self.switch_id = m.get('SwitchId') - if m.get('VpcId') is not None: - self.vpc_id = m.get('VpcId') + if m.get('QuotaId') is not None: + self.quota_id = m.get('QuotaId') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') return self -class CreateTrainingJobRequest(TeaModel): +class CreateQuotaResponse(TeaModel): def __init__( self, - algorithm_name: str = None, - algorithm_provider: str = None, - algorithm_spec: AlgorithmSpec = None, - algorithm_version: str = None, - code_dir: Location = None, - compute_resource: CreateTrainingJobRequestComputeResource = None, - hyper_parameters: List[CreateTrainingJobRequestHyperParameters] = None, - input_channels: List[CreateTrainingJobRequestInputChannels] = None, - labels: List[CreateTrainingJobRequestLabels] = None, - output_channels: List[CreateTrainingJobRequestOutputChannels] = None, - role_arn: str = None, - scheduler: CreateTrainingJobRequestScheduler = None, - training_job_description: str = None, - training_job_name: str = None, - user_vpc: CreateTrainingJobRequestUserVpc = None, - workspace_id: str = None, + headers: Dict[str, str] = None, + status_code: int = None, + body: CreateQuotaResponseBody = None, ): - self.algorithm_name = algorithm_name - self.algorithm_provider = algorithm_provider - self.algorithm_spec = algorithm_spec - self.algorithm_version = algorithm_version - self.code_dir = code_dir - self.compute_resource = compute_resource - self.hyper_parameters = hyper_parameters - self.input_channels = input_channels - self.labels = labels - self.output_channels = output_channels - self.role_arn = role_arn - self.scheduler = scheduler - self.training_job_description = training_job_description - self.training_job_name = training_job_name - self.user_vpc = user_vpc - self.workspace_id = workspace_id + self.headers = headers + self.status_code = status_code + self.body = body def validate(self): - if self.algorithm_spec: - self.algorithm_spec.validate() - if self.code_dir: - self.code_dir.validate() - if self.compute_resource: - self.compute_resource.validate() - if self.hyper_parameters: - for k in self.hyper_parameters: - if k: - k.validate() - if self.input_channels: - for k in self.input_channels: - if k: - k.validate() - if self.labels: - for k in self.labels: - if k: - k.validate() - if self.output_channels: - for k in self.output_channels: - if k: - k.validate() - if self.scheduler: - self.scheduler.validate() - if self.user_vpc: - self.user_vpc.validate() + if self.body: + self.body.validate() def to_map(self): _map = super().to_map() @@ -4461,110 +5182,34 @@ def to_map(self): return _map result = dict() - if self.algorithm_name is not None: - result['AlgorithmName'] = self.algorithm_name - if self.algorithm_provider is not None: - result['AlgorithmProvider'] = self.algorithm_provider - if self.algorithm_spec is not None: - result['AlgorithmSpec'] = self.algorithm_spec.to_map() - if self.algorithm_version is not None: - result['AlgorithmVersion'] = self.algorithm_version - if self.code_dir is not None: - result['CodeDir'] = self.code_dir.to_map() - if self.compute_resource is not None: - result['ComputeResource'] = self.compute_resource.to_map() - result['HyperParameters'] = [] - if self.hyper_parameters is not None: - for k in self.hyper_parameters: - result['HyperParameters'].append(k.to_map() if k else None) - result['InputChannels'] = [] - if self.input_channels is not None: - for k in self.input_channels: - result['InputChannels'].append(k.to_map() if k else None) - result['Labels'] = [] - if self.labels is not None: - for k in self.labels: - result['Labels'].append(k.to_map() if k else None) - result['OutputChannels'] = [] - if self.output_channels is not None: - for k in self.output_channels: - result['OutputChannels'].append(k.to_map() if k else None) - if self.role_arn is not None: - result['RoleArn'] = self.role_arn - if self.scheduler is not None: - result['Scheduler'] = self.scheduler.to_map() - if self.training_job_description is not None: - result['TrainingJobDescription'] = self.training_job_description - if self.training_job_name is not None: - result['TrainingJobName'] = self.training_job_name - if self.user_vpc is not None: - result['UserVpc'] = self.user_vpc.to_map() - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() return result def from_map(self, m: dict = None): m = m or dict() - if m.get('AlgorithmName') is not None: - self.algorithm_name = m.get('AlgorithmName') - if m.get('AlgorithmProvider') is not None: - self.algorithm_provider = m.get('AlgorithmProvider') - if m.get('AlgorithmSpec') is not None: - temp_model = AlgorithmSpec() - self.algorithm_spec = temp_model.from_map(m['AlgorithmSpec']) - if m.get('AlgorithmVersion') is not None: - self.algorithm_version = m.get('AlgorithmVersion') - if m.get('CodeDir') is not None: - temp_model = Location() - self.code_dir = temp_model.from_map(m['CodeDir']) - if m.get('ComputeResource') is not None: - temp_model = CreateTrainingJobRequestComputeResource() - self.compute_resource = temp_model.from_map(m['ComputeResource']) - self.hyper_parameters = [] - if m.get('HyperParameters') is not None: - for k in m.get('HyperParameters'): - temp_model = CreateTrainingJobRequestHyperParameters() - self.hyper_parameters.append(temp_model.from_map(k)) - self.input_channels = [] - if m.get('InputChannels') is not None: - for k in m.get('InputChannels'): - temp_model = CreateTrainingJobRequestInputChannels() - self.input_channels.append(temp_model.from_map(k)) - self.labels = [] - if m.get('Labels') is not None: - for k in m.get('Labels'): - temp_model = CreateTrainingJobRequestLabels() - self.labels.append(temp_model.from_map(k)) - self.output_channels = [] - if m.get('OutputChannels') is not None: - for k in m.get('OutputChannels'): - temp_model = CreateTrainingJobRequestOutputChannels() - self.output_channels.append(temp_model.from_map(k)) - if m.get('RoleArn') is not None: - self.role_arn = m.get('RoleArn') - if m.get('Scheduler') is not None: - temp_model = CreateTrainingJobRequestScheduler() - self.scheduler = temp_model.from_map(m['Scheduler']) - if m.get('TrainingJobDescription') is not None: - self.training_job_description = m.get('TrainingJobDescription') - if m.get('TrainingJobName') is not None: - self.training_job_name = m.get('TrainingJobName') - if m.get('UserVpc') is not None: - temp_model = CreateTrainingJobRequestUserVpc() - self.user_vpc = temp_model.from_map(m['UserVpc']) - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = CreateQuotaResponseBody() + self.body = temp_model.from_map(m['body']) return self -class CreateTrainingJobResponseBody(TeaModel): +class CreateResourceGroupRequestTag(TeaModel): def __init__( self, - request_id: str = None, - training_job_id: str = None, + key: str = None, + value: str = None, ): - self.request_id = request_id - self.training_job_id = training_job_id + self.key = key + self.value = value def validate(self): pass @@ -4575,38 +5220,45 @@ def to_map(self): return _map result = dict() - if self.request_id is not None: - result['RequestId'] = self.request_id - if self.training_job_id is not None: - result['TrainingJobId'] = self.training_job_id + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value return result def from_map(self, m: dict = None): m = m or dict() - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') - if m.get('TrainingJobId') is not None: - self.training_job_id = m.get('TrainingJobId') + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') return self -class CreateTrainingJobResponse(TeaModel): +class CreateResourceGroupRequest(TeaModel): def __init__( self, - headers: Dict[str, str] = None, - status_code: int = None, - body: CreateTrainingJobResponseBody = None, + computing_resource_provider: str = None, + description: str = None, + name: str = None, + resource_type: str = None, + tag: List[CreateResourceGroupRequestTag] = None, + user_vpc: UserVpc = None, ): - self.headers = headers - self.status_code = status_code - self.body = body + self.computing_resource_provider = computing_resource_provider + self.description = description + self.name = name + self.resource_type = resource_type + self.tag = tag + self.user_vpc = user_vpc def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() + if self.tag: + for k in self.tag: + if k: + k.validate() + if self.user_vpc: + self.user_vpc.validate() def to_map(self): _map = super().to_map() @@ -4614,32 +5266,51 @@ def to_map(self): return _map result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + if self.computing_resource_provider is not None: + result['ComputingResourceProvider'] = self.computing_resource_provider + if self.description is not None: + result['Description'] = self.description + if self.name is not None: + result['Name'] = self.name + if self.resource_type is not None: + result['ResourceType'] = self.resource_type + result['Tag'] = [] + if self.tag is not None: + for k in self.tag: + result['Tag'].append(k.to_map() if k else None) + if self.user_vpc is not None: + result['UserVpc'] = self.user_vpc.to_map() return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = CreateTrainingJobResponseBody() - self.body = temp_model.from_map(m['body']) + if m.get('ComputingResourceProvider') is not None: + self.computing_resource_provider = m.get('ComputingResourceProvider') + if m.get('Description') is not None: + self.description = m.get('Description') + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('ResourceType') is not None: + self.resource_type = m.get('ResourceType') + self.tag = [] + if m.get('Tag') is not None: + for k in m.get('Tag'): + temp_model = CreateResourceGroupRequestTag() + self.tag.append(temp_model.from_map(k)) + if m.get('UserVpc') is not None: + temp_model = UserVpc() + self.user_vpc = temp_model.from_map(m['UserVpc']) return self -class DeleteAlgorithmResponseBody(TeaModel): +class CreateResourceGroupResponseBody(TeaModel): def __init__( self, request_id: str = None, + resource_group_id: str = None, ): self.request_id = request_id + self.resource_group_id = resource_group_id def validate(self): pass @@ -4651,31 +5322,32 @@ def to_map(self): result = dict() if self.request_id is not None: - result['requestId'] = self.request_id + result['RequestId'] = self.request_id + if self.resource_group_id is not None: + result['ResourceGroupID'] = self.resource_group_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('requestId') is not None: - self.request_id = m.get('requestId') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('ResourceGroupID') is not None: + self.resource_group_id = m.get('ResourceGroupID') return self -class DeleteAlgorithmResponse(TeaModel): +class CreateResourceGroupResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: DeleteAlgorithmResponseBody = None, + body: CreateResourceGroupResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -4700,17 +5372,19 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = DeleteAlgorithmResponseBody() + temp_model = CreateResourceGroupResponseBody() self.body = temp_model.from_map(m['body']) return self -class DeleteAlgorithmVersionResponseBody(TeaModel): +class CreateResourceGroupMachineGroupRequestTag(TeaModel): def __init__( self, - request_id: str = None, + key: str = None, + value: str = None, ): - self.request_id = request_id + self.key = key + self.value = value def validate(self): pass @@ -4721,34 +5395,45 @@ def to_map(self): return _map result = dict() - if self.request_id is not None: - result['RequestId'] = self.request_id + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value return result def from_map(self, m: dict = None): m = m or dict() - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') return self -class DeleteAlgorithmVersionResponse(TeaModel): +class CreateResourceGroupMachineGroupRequest(TeaModel): def __init__( self, - headers: Dict[str, str] = None, - status_code: int = None, - body: DeleteAlgorithmVersionResponseBody = None, - ): - self.headers = headers - self.status_code = status_code - self.body = body + ecs_count: int = None, + ecs_spec: str = None, + name: str = None, + payment_duration: str = None, + payment_duration_unit: str = None, + payment_type: str = None, + tag: List[CreateResourceGroupMachineGroupRequestTag] = None, + ): + self.ecs_count = ecs_count + self.ecs_spec = ecs_spec + self.name = name + self.payment_duration = payment_duration + self.payment_duration_unit = payment_duration_unit + self.payment_type = payment_type + self.tag = tag def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() + if self.tag: + for k in self.tag: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -4756,31 +5441,53 @@ def to_map(self): return _map result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + if self.ecs_count is not None: + result['EcsCount'] = self.ecs_count + if self.ecs_spec is not None: + result['EcsSpec'] = self.ecs_spec + if self.name is not None: + result['Name'] = self.name + if self.payment_duration is not None: + result['PaymentDuration'] = self.payment_duration + if self.payment_duration_unit is not None: + result['PaymentDurationUnit'] = self.payment_duration_unit + if self.payment_type is not None: + result['PaymentType'] = self.payment_type + result['Tag'] = [] + if self.tag is not None: + for k in self.tag: + result['Tag'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = DeleteAlgorithmVersionResponseBody() - self.body = temp_model.from_map(m['body']) + if m.get('EcsCount') is not None: + self.ecs_count = m.get('EcsCount') + if m.get('EcsSpec') is not None: + self.ecs_spec = m.get('EcsSpec') + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('PaymentDuration') is not None: + self.payment_duration = m.get('PaymentDuration') + if m.get('PaymentDurationUnit') is not None: + self.payment_duration_unit = m.get('PaymentDurationUnit') + if m.get('PaymentType') is not None: + self.payment_type = m.get('PaymentType') + self.tag = [] + if m.get('Tag') is not None: + for k in m.get('Tag'): + temp_model = CreateResourceGroupMachineGroupRequestTag() + self.tag.append(temp_model.from_map(k)) return self -class DeleteComponentResponseBody(TeaModel): +class CreateResourceGroupMachineGroupResponseBody(TeaModel): def __init__( self, + machine_group_id: str = None, request_id: str = None, ): + self.machine_group_id = machine_group_id self.request_id = request_id def validate(self): @@ -4792,32 +5499,33 @@ def to_map(self): return _map result = dict() + if self.machine_group_id is not None: + result['MachineGroupID'] = self.machine_group_id if self.request_id is not None: result['RequestId'] = self.request_id return result def from_map(self, m: dict = None): m = m or dict() + if m.get('MachineGroupID') is not None: + self.machine_group_id = m.get('MachineGroupID') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') return self -class DeleteComponentResponse(TeaModel): +class CreateResourceGroupMachineGroupResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: DeleteComponentResponseBody = None, + body: CreateResourceGroupMachineGroupResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -4842,17 +5550,46 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = DeleteComponentResponseBody() + temp_model = CreateResourceGroupMachineGroupResponseBody() self.body = temp_model.from_map(m['body']) return self -class DeleteComponentVersionResponseBody(TeaModel): +class CreateServiceIdentityRoleRequest(TeaModel): + def __init__( + self, + role_name: str = None, + ): + self.role_name = role_name + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.role_name is not None: + result['RoleName'] = self.role_name + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RoleName') is not None: + self.role_name = m.get('RoleName') + return self + + +class CreateServiceIdentityRoleResponseBody(TeaModel): def __init__( self, request_id: str = None, + role_name: str = None, ): self.request_id = request_id + self.role_name = role_name def validate(self): pass @@ -4865,30 +5602,31 @@ def to_map(self): result = dict() if self.request_id is not None: result['RequestId'] = self.request_id + if self.role_name is not None: + result['RoleName'] = self.role_name return result def from_map(self, m: dict = None): m = m or dict() if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('RoleName') is not None: + self.role_name = m.get('RoleName') return self -class DeleteComponentVersionResponse(TeaModel): +class CreateServiceIdentityRoleResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: DeleteComponentVersionResponseBody = None, + body: CreateServiceIdentityRoleResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -4913,17 +5651,25 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = DeleteComponentVersionResponseBody() + temp_model = CreateServiceIdentityRoleResponseBody() self.body = temp_model.from_map(m['body']) return self -class DeleteComponentVersionSnapshotResponseBody(TeaModel): +class CreateTrainingJobRequestComputeResourceInstanceSpec(TeaModel): def __init__( self, - request_id: str = None, + cpu: str = None, + gpu: str = None, + gputype: str = None, + memory: str = None, + shared_memory: str = None, ): - self.request_id = request_id + self.cpu = cpu + self.gpu = gpu + self.gputype = gputype + self.memory = memory + self.shared_memory = shared_memory def validate(self): pass @@ -4934,34 +5680,44 @@ def to_map(self): return _map result = dict() - if self.request_id is not None: - result['RequestId'] = self.request_id + if self.cpu is not None: + result['CPU'] = self.cpu + if self.gpu is not None: + result['GPU'] = self.gpu + if self.gputype is not None: + result['GPUType'] = self.gputype + if self.memory is not None: + result['Memory'] = self.memory + if self.shared_memory is not None: + result['SharedMemory'] = self.shared_memory return result def from_map(self, m: dict = None): m = m or dict() - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') + if m.get('CPU') is not None: + self.cpu = m.get('CPU') + if m.get('GPU') is not None: + self.gpu = m.get('GPU') + if m.get('GPUType') is not None: + self.gputype = m.get('GPUType') + if m.get('Memory') is not None: + self.memory = m.get('Memory') + if m.get('SharedMemory') is not None: + self.shared_memory = m.get('SharedMemory') return self -class DeleteComponentVersionSnapshotResponse(TeaModel): +class CreateTrainingJobRequestComputeResourceSpotSpec(TeaModel): def __init__( self, - headers: Dict[str, str] = None, - status_code: int = None, - body: DeleteComponentVersionSnapshotResponseBody = None, + spot_discount_limit: float = None, + spot_strategy: str = None, ): - self.headers = headers - self.status_code = status_code - self.body = body + self.spot_discount_limit = spot_discount_limit + self.spot_strategy = spot_strategy def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() + pass def to_map(self): _map = super().to_map() @@ -4969,37 +5725,45 @@ def to_map(self): return _map result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + if self.spot_discount_limit is not None: + result['SpotDiscountLimit'] = self.spot_discount_limit + if self.spot_strategy is not None: + result['SpotStrategy'] = self.spot_strategy return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = DeleteComponentVersionSnapshotResponseBody() - self.body = temp_model.from_map(m['body']) + if m.get('SpotDiscountLimit') is not None: + self.spot_discount_limit = m.get('SpotDiscountLimit') + if m.get('SpotStrategy') is not None: + self.spot_strategy = m.get('SpotStrategy') return self -class DeleteMachineGroupResponseBody(TeaModel): +class CreateTrainingJobRequestComputeResource(TeaModel): def __init__( self, - machine_group_id: str = None, - request_id: str = None, + ecs_count: int = None, + ecs_spec: str = None, + instance_count: int = None, + instance_spec: CreateTrainingJobRequestComputeResourceInstanceSpec = None, + resource_id: str = None, + spot_spec: CreateTrainingJobRequestComputeResourceSpotSpec = None, + use_spot_instance: bool = None, ): - self.machine_group_id = machine_group_id - self.request_id = request_id + self.ecs_count = ecs_count + self.ecs_spec = ecs_spec + self.instance_count = instance_count + self.instance_spec = instance_spec + self.resource_id = resource_id + self.spot_spec = spot_spec + self.use_spot_instance = use_spot_instance def validate(self): - pass + if self.instance_spec: + self.instance_spec.validate() + if self.spot_spec: + self.spot_spec.validate() def to_map(self): _map = super().to_map() @@ -5007,38 +5771,52 @@ def to_map(self): return _map result = dict() - if self.machine_group_id is not None: - result['MachineGroupID'] = self.machine_group_id - if self.request_id is not None: - result['RequestId'] = self.request_id + if self.ecs_count is not None: + result['EcsCount'] = self.ecs_count + if self.ecs_spec is not None: + result['EcsSpec'] = self.ecs_spec + if self.instance_count is not None: + result['InstanceCount'] = self.instance_count + if self.instance_spec is not None: + result['InstanceSpec'] = self.instance_spec.to_map() + if self.resource_id is not None: + result['ResourceId'] = self.resource_id + if self.spot_spec is not None: + result['SpotSpec'] = self.spot_spec.to_map() + if self.use_spot_instance is not None: + result['UseSpotInstance'] = self.use_spot_instance return result def from_map(self, m: dict = None): m = m or dict() - if m.get('MachineGroupID') is not None: - self.machine_group_id = m.get('MachineGroupID') - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') + if m.get('EcsCount') is not None: + self.ecs_count = m.get('EcsCount') + if m.get('EcsSpec') is not None: + self.ecs_spec = m.get('EcsSpec') + if m.get('InstanceCount') is not None: + self.instance_count = m.get('InstanceCount') + if m.get('InstanceSpec') is not None: + temp_model = CreateTrainingJobRequestComputeResourceInstanceSpec() + self.instance_spec = temp_model.from_map(m['InstanceSpec']) + if m.get('ResourceId') is not None: + self.resource_id = m.get('ResourceId') + if m.get('SpotSpec') is not None: + temp_model = CreateTrainingJobRequestComputeResourceSpotSpec() + self.spot_spec = temp_model.from_map(m['SpotSpec']) + if m.get('UseSpotInstance') is not None: + self.use_spot_instance = m.get('UseSpotInstance') return self -class DeleteMachineGroupResponse(TeaModel): +class CreateTrainingJobRequestExperimentConfig(TeaModel): def __init__( self, - headers: Dict[str, str] = None, - status_code: int = None, - body: DeleteMachineGroupResponseBody = None, + experiment_id: str = None, ): - self.headers = headers - self.status_code = status_code - self.body = body + self.experiment_id = experiment_id def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() + pass def to_map(self): _map = super().to_map() @@ -5046,34 +5824,25 @@ def to_map(self): return _map result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + if self.experiment_id is not None: + result['ExperimentId'] = self.experiment_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = DeleteMachineGroupResponseBody() - self.body = temp_model.from_map(m['body']) + if m.get('ExperimentId') is not None: + self.experiment_id = m.get('ExperimentId') return self -class DeleteQuotaResponseBody(TeaModel): +class CreateTrainingJobRequestHyperParameters(TeaModel): def __init__( self, - quota_id: str = None, - request_id: str = None, + name: str = None, + value: str = None, ): - self.quota_id = quota_id - self.request_id = request_id + self.name = name + self.value = value def validate(self): pass @@ -5084,71 +5853,31 @@ def to_map(self): return _map result = dict() - if self.quota_id is not None: - result['QuotaId'] = self.quota_id - if self.request_id is not None: - result['RequestId'] = self.request_id - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('QuotaId') is not None: - self.quota_id = m.get('QuotaId') - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') - return self - - -class DeleteQuotaResponse(TeaModel): - def __init__( - self, - headers: Dict[str, str] = None, - status_code: int = None, - body: DeleteQuotaResponseBody = None, - ): - self.headers = headers - self.status_code = status_code - self.body = body - - def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + if self.name is not None: + result['Name'] = self.name + if self.value is not None: + result['Value'] = self.value return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = DeleteQuotaResponseBody() - self.body = temp_model.from_map(m['body']) + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('Value') is not None: + self.value = m.get('Value') return self -class DeleteQuotaLabelsRequest(TeaModel): +class CreateTrainingJobRequestInputChannels(TeaModel): def __init__( self, - keys: str = None, + dataset_id: str = None, + input_uri: str = None, + name: str = None, ): - self.keys = keys + self.dataset_id = dataset_id + self.input_uri = input_uri + self.name = name def validate(self): pass @@ -5159,25 +5888,33 @@ def to_map(self): return _map result = dict() - if self.keys is not None: - result['Keys'] = self.keys + if self.dataset_id is not None: + result['DatasetId'] = self.dataset_id + if self.input_uri is not None: + result['InputUri'] = self.input_uri + if self.name is not None: + result['Name'] = self.name return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Keys') is not None: - self.keys = m.get('Keys') + if m.get('DatasetId') is not None: + self.dataset_id = m.get('DatasetId') + if m.get('InputUri') is not None: + self.input_uri = m.get('InputUri') + if m.get('Name') is not None: + self.name = m.get('Name') return self -class DeleteQuotaLabelsResponseBody(TeaModel): +class CreateTrainingJobRequestLabels(TeaModel): def __init__( self, - quota_id: str = None, - request_id: str = None, + key: str = None, + value: str = None, ): - self.quota_id = quota_id - self.request_id = request_id + self.key = key + self.value = value def validate(self): pass @@ -5188,38 +5925,34 @@ def to_map(self): return _map result = dict() - if self.quota_id is not None: - result['QuotaId'] = self.quota_id - if self.request_id is not None: - result['RequestId'] = self.request_id + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value return result def from_map(self, m: dict = None): m = m or dict() - if m.get('QuotaId') is not None: - self.quota_id = m.get('QuotaId') - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') return self -class DeleteQuotaLabelsResponse(TeaModel): +class CreateTrainingJobRequestOutputChannels(TeaModel): def __init__( self, - headers: Dict[str, str] = None, - status_code: int = None, - body: DeleteQuotaLabelsResponseBody = None, + dataset_id: str = None, + name: str = None, + output_uri: str = None, ): - self.headers = headers - self.status_code = status_code - self.body = body + self.dataset_id = dataset_id + self.name = name + self.output_uri = output_uri def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() + pass def to_map(self): _map = super().to_map() @@ -5227,34 +5960,31 @@ def to_map(self): return _map result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + if self.dataset_id is not None: + result['DatasetId'] = self.dataset_id + if self.name is not None: + result['Name'] = self.name + if self.output_uri is not None: + result['OutputUri'] = self.output_uri return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = DeleteQuotaLabelsResponseBody() - self.body = temp_model.from_map(m['body']) + if m.get('DatasetId') is not None: + self.dataset_id = m.get('DatasetId') + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('OutputUri') is not None: + self.output_uri = m.get('OutputUri') return self -class DeleteResourceGroupResponseBody(TeaModel): +class CreateTrainingJobRequestScheduler(TeaModel): def __init__( self, - request_id: str = None, - resource_group_id: str = None, + max_running_time_in_seconds: int = None, ): - self.request_id = request_id - self.resource_group_id = resource_group_id + self.max_running_time_in_seconds = max_running_time_in_seconds def validate(self): pass @@ -5265,38 +5995,32 @@ def to_map(self): return _map result = dict() - if self.request_id is not None: - result['RequestId'] = self.request_id - if self.resource_group_id is not None: - result['ResourceGroupID'] = self.resource_group_id + if self.max_running_time_in_seconds is not None: + result['MaxRunningTimeInSeconds'] = self.max_running_time_in_seconds return result def from_map(self, m: dict = None): m = m or dict() - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') - if m.get('ResourceGroupID') is not None: - self.resource_group_id = m.get('ResourceGroupID') + if m.get('MaxRunningTimeInSeconds') is not None: + self.max_running_time_in_seconds = m.get('MaxRunningTimeInSeconds') return self -class DeleteResourceGroupResponse(TeaModel): +class CreateTrainingJobRequestSettings(TeaModel): def __init__( self, - headers: Dict[str, str] = None, - status_code: int = None, - body: DeleteResourceGroupResponseBody = None, + aimaster_type: str = None, + enable_error_monitoring_in_aimaster: bool = None, + error_monitoring_args: str = None, + priority: int = None, ): - self.headers = headers - self.status_code = status_code - self.body = body + self.aimaster_type = aimaster_type + self.enable_error_monitoring_in_aimaster = enable_error_monitoring_in_aimaster + self.error_monitoring_args = error_monitoring_args + self.priority = priority def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() + pass def to_map(self): _map = super().to_map() @@ -5304,34 +6028,43 @@ def to_map(self): return _map result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + if self.aimaster_type is not None: + result['AIMasterType'] = self.aimaster_type + if self.enable_error_monitoring_in_aimaster is not None: + result['EnableErrorMonitoringInAIMaster'] = self.enable_error_monitoring_in_aimaster + if self.error_monitoring_args is not None: + result['ErrorMonitoringArgs'] = self.error_monitoring_args + if self.priority is not None: + result['Priority'] = self.priority return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = DeleteResourceGroupResponseBody() - self.body = temp_model.from_map(m['body']) + if m.get('AIMasterType') is not None: + self.aimaster_type = m.get('AIMasterType') + if m.get('EnableErrorMonitoringInAIMaster') is not None: + self.enable_error_monitoring_in_aimaster = m.get('EnableErrorMonitoringInAIMaster') + if m.get('ErrorMonitoringArgs') is not None: + self.error_monitoring_args = m.get('ErrorMonitoringArgs') + if m.get('Priority') is not None: + self.priority = m.get('Priority') return self -class DeleteResourceGroupMachineGroupResponseBody(TeaModel): +class CreateTrainingJobRequestUserVpc(TeaModel): def __init__( self, - machine_group_id: str = None, - request_id: str = None, + default_route: str = None, + extended_cidrs: List[str] = None, + security_group_id: str = None, + switch_id: str = None, + vpc_id: str = None, ): - self.machine_group_id = machine_group_id - self.request_id = request_id + self.default_route = default_route + self.extended_cidrs = extended_cidrs + self.security_group_id = security_group_id + self.switch_id = switch_id + self.vpc_id = vpc_id def validate(self): pass @@ -5342,38 +6075,113 @@ def to_map(self): return _map result = dict() - if self.machine_group_id is not None: - result['MachineGroupID'] = self.machine_group_id - if self.request_id is not None: - result['RequestId'] = self.request_id + if self.default_route is not None: + result['DefaultRoute'] = self.default_route + if self.extended_cidrs is not None: + result['ExtendedCIDRs'] = self.extended_cidrs + if self.security_group_id is not None: + result['SecurityGroupId'] = self.security_group_id + if self.switch_id is not None: + result['SwitchId'] = self.switch_id + if self.vpc_id is not None: + result['VpcId'] = self.vpc_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('MachineGroupID') is not None: - self.machine_group_id = m.get('MachineGroupID') - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') + if m.get('DefaultRoute') is not None: + self.default_route = m.get('DefaultRoute') + if m.get('ExtendedCIDRs') is not None: + self.extended_cidrs = m.get('ExtendedCIDRs') + if m.get('SecurityGroupId') is not None: + self.security_group_id = m.get('SecurityGroupId') + if m.get('SwitchId') is not None: + self.switch_id = m.get('SwitchId') + if m.get('VpcId') is not None: + self.vpc_id = m.get('VpcId') return self -class DeleteResourceGroupMachineGroupResponse(TeaModel): +class CreateTrainingJobRequest(TeaModel): def __init__( self, - headers: Dict[str, str] = None, - status_code: int = None, - body: DeleteResourceGroupMachineGroupResponseBody = None, + algorithm_name: str = None, + algorithm_provider: str = None, + algorithm_spec: AlgorithmSpec = None, + algorithm_version: str = None, + code_dir: Location = None, + compute_resource: CreateTrainingJobRequestComputeResource = None, + environments: Dict[str, str] = None, + experiment_config: CreateTrainingJobRequestExperimentConfig = None, + hyper_parameters: List[CreateTrainingJobRequestHyperParameters] = None, + input_channels: List[CreateTrainingJobRequestInputChannels] = None, + labels: List[CreateTrainingJobRequestLabels] = None, + output_channels: List[CreateTrainingJobRequestOutputChannels] = None, + python_requirements: List[str] = None, + role_arn: str = None, + scheduler: CreateTrainingJobRequestScheduler = None, + settings: CreateTrainingJobRequestSettings = None, + training_job_description: str = None, + training_job_name: str = None, + user_vpc: CreateTrainingJobRequestUserVpc = None, + workspace_id: str = None, + resource_type: str = None, ): - self.headers = headers - self.status_code = status_code - self.body = body + self.algorithm_name = algorithm_name + self.algorithm_provider = algorithm_provider + self.algorithm_spec = algorithm_spec + self.algorithm_version = algorithm_version + self.code_dir = code_dir + self.compute_resource = compute_resource + self.environments = environments + self.experiment_config = experiment_config + self.hyper_parameters = hyper_parameters + self.input_channels = input_channels + self.labels = labels + self.output_channels = output_channels + self.python_requirements = python_requirements + self.role_arn = role_arn + self.scheduler = scheduler + self.settings = settings + self.training_job_description = training_job_description + # This parameter is required. + self.training_job_name = training_job_name + self.user_vpc = user_vpc + # This parameter is required. + self.workspace_id = workspace_id + self.resource_type = resource_type def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() + if self.algorithm_spec: + self.algorithm_spec.validate() + if self.code_dir: + self.code_dir.validate() + if self.compute_resource: + self.compute_resource.validate() + if self.experiment_config: + self.experiment_config.validate() + if self.hyper_parameters: + for k in self.hyper_parameters: + if k: + k.validate() + if self.input_channels: + for k in self.input_channels: + if k: + k.validate() + if self.labels: + for k in self.labels: + if k: + k.validate() + if self.output_channels: + for k in self.output_channels: + if k: + k.validate() + if self.scheduler: + self.scheduler.validate() + if self.settings: + self.settings.validate() + if self.user_vpc: + self.user_vpc.validate() def to_map(self): _map = super().to_map() @@ -5381,32 +6189,132 @@ def to_map(self): return _map result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + if self.algorithm_name is not None: + result['AlgorithmName'] = self.algorithm_name + if self.algorithm_provider is not None: + result['AlgorithmProvider'] = self.algorithm_provider + if self.algorithm_spec is not None: + result['AlgorithmSpec'] = self.algorithm_spec.to_map() + if self.algorithm_version is not None: + result['AlgorithmVersion'] = self.algorithm_version + if self.code_dir is not None: + result['CodeDir'] = self.code_dir.to_map() + if self.compute_resource is not None: + result['ComputeResource'] = self.compute_resource.to_map() + if self.environments is not None: + result['Environments'] = self.environments + if self.experiment_config is not None: + result['ExperimentConfig'] = self.experiment_config.to_map() + result['HyperParameters'] = [] + if self.hyper_parameters is not None: + for k in self.hyper_parameters: + result['HyperParameters'].append(k.to_map() if k else None) + result['InputChannels'] = [] + if self.input_channels is not None: + for k in self.input_channels: + result['InputChannels'].append(k.to_map() if k else None) + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + result['OutputChannels'] = [] + if self.output_channels is not None: + for k in self.output_channels: + result['OutputChannels'].append(k.to_map() if k else None) + if self.python_requirements is not None: + result['PythonRequirements'] = self.python_requirements + if self.role_arn is not None: + result['RoleArn'] = self.role_arn + if self.scheduler is not None: + result['Scheduler'] = self.scheduler.to_map() + if self.settings is not None: + result['Settings'] = self.settings.to_map() + if self.training_job_description is not None: + result['TrainingJobDescription'] = self.training_job_description + if self.training_job_name is not None: + result['TrainingJobName'] = self.training_job_name + if self.user_vpc is not None: + result['UserVpc'] = self.user_vpc.to_map() + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id + if self.resource_type is not None: + result['ResourceType'] = self.resource_type return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = DeleteResourceGroupMachineGroupResponseBody() - self.body = temp_model.from_map(m['body']) + if m.get('AlgorithmName') is not None: + self.algorithm_name = m.get('AlgorithmName') + if m.get('AlgorithmProvider') is not None: + self.algorithm_provider = m.get('AlgorithmProvider') + if m.get('AlgorithmSpec') is not None: + temp_model = AlgorithmSpec() + self.algorithm_spec = temp_model.from_map(m['AlgorithmSpec']) + if m.get('AlgorithmVersion') is not None: + self.algorithm_version = m.get('AlgorithmVersion') + if m.get('CodeDir') is not None: + temp_model = Location() + self.code_dir = temp_model.from_map(m['CodeDir']) + if m.get('ComputeResource') is not None: + temp_model = CreateTrainingJobRequestComputeResource() + self.compute_resource = temp_model.from_map(m['ComputeResource']) + if m.get('Environments') is not None: + self.environments = m.get('Environments') + if m.get('ExperimentConfig') is not None: + temp_model = CreateTrainingJobRequestExperimentConfig() + self.experiment_config = temp_model.from_map(m['ExperimentConfig']) + self.hyper_parameters = [] + if m.get('HyperParameters') is not None: + for k in m.get('HyperParameters'): + temp_model = CreateTrainingJobRequestHyperParameters() + self.hyper_parameters.append(temp_model.from_map(k)) + self.input_channels = [] + if m.get('InputChannels') is not None: + for k in m.get('InputChannels'): + temp_model = CreateTrainingJobRequestInputChannels() + self.input_channels.append(temp_model.from_map(k)) + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = CreateTrainingJobRequestLabels() + self.labels.append(temp_model.from_map(k)) + self.output_channels = [] + if m.get('OutputChannels') is not None: + for k in m.get('OutputChannels'): + temp_model = CreateTrainingJobRequestOutputChannels() + self.output_channels.append(temp_model.from_map(k)) + if m.get('PythonRequirements') is not None: + self.python_requirements = m.get('PythonRequirements') + if m.get('RoleArn') is not None: + self.role_arn = m.get('RoleArn') + if m.get('Scheduler') is not None: + temp_model = CreateTrainingJobRequestScheduler() + self.scheduler = temp_model.from_map(m['Scheduler']) + if m.get('Settings') is not None: + temp_model = CreateTrainingJobRequestSettings() + self.settings = temp_model.from_map(m['Settings']) + if m.get('TrainingJobDescription') is not None: + self.training_job_description = m.get('TrainingJobDescription') + if m.get('TrainingJobName') is not None: + self.training_job_name = m.get('TrainingJobName') + if m.get('UserVpc') is not None: + temp_model = CreateTrainingJobRequestUserVpc() + self.user_vpc = temp_model.from_map(m['UserVpc']) + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + if m.get('ResourceType') is not None: + self.resource_type = m.get('ResourceType') return self -class DeleteTrainingJobResponseBody(TeaModel): +class CreateTrainingJobResponseBody(TeaModel): def __init__( self, request_id: str = None, + training_job_id: str = None, ): self.request_id = request_id + self.training_job_id = training_job_id def validate(self): pass @@ -5419,30 +6327,31 @@ def to_map(self): result = dict() if self.request_id is not None: result['RequestId'] = self.request_id + if self.training_job_id is not None: + result['TrainingJobId'] = self.training_job_id return result def from_map(self, m: dict = None): m = m or dict() if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('TrainingJobId') is not None: + self.training_job_id = m.get('TrainingJobId') return self -class DeleteTrainingJobResponse(TeaModel): +class CreateTrainingJobResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: DeleteTrainingJobResponseBody = None, + body: CreateTrainingJobResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -5467,17 +6376,17 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = DeleteTrainingJobResponseBody() + temp_model = CreateTrainingJobResponseBody() self.body = temp_model.from_map(m['body']) return self -class DeleteTrainingJobLabelsRequest(TeaModel): +class DeleteAlgorithmResponseBody(TeaModel): def __init__( self, - keys: str = None, + request_id: str = None, ): - self.keys = keys + self.request_id = request_id def validate(self): pass @@ -5488,18 +6397,59 @@ def to_map(self): return _map result = dict() - if self.keys is not None: - result['Keys'] = self.keys + if self.request_id is not None: + result['requestId'] = self.request_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Keys') is not None: - self.keys = m.get('Keys') + if m.get('requestId') is not None: + self.request_id = m.get('requestId') return self -class DeleteTrainingJobLabelsResponseBody(TeaModel): +class DeleteAlgorithmResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: DeleteAlgorithmResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DeleteAlgorithmResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DeleteAlgorithmVersionResponseBody(TeaModel): def __init__( self, request_id: str = None, @@ -5526,21 +6476,18 @@ def from_map(self, m: dict = None): return self -class DeleteTrainingJobLabelsResponse(TeaModel): +class DeleteAlgorithmVersionResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: DeleteTrainingJobLabelsResponseBody = None, + body: DeleteAlgorithmVersionResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -5565,22 +6512,16 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = DeleteTrainingJobLabelsResponseBody() + temp_model = DeleteAlgorithmVersionResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetAI4DDefaultBucketResponseBody(TeaModel): +class DeleteComponentResponseBody(TeaModel): def __init__( self, - extranet_endpoint: str = None, - intranet_endpoint: str = None, - name: str = None, request_id: str = None, ): - self.extranet_endpoint = extranet_endpoint - self.intranet_endpoint = intranet_endpoint - self.name = name self.request_id = request_id def validate(self): @@ -5592,44 +6533,29 @@ def to_map(self): return _map result = dict() - if self.extranet_endpoint is not None: - result['ExtranetEndpoint'] = self.extranet_endpoint - if self.intranet_endpoint is not None: - result['IntranetEndpoint'] = self.intranet_endpoint - if self.name is not None: - result['Name'] = self.name if self.request_id is not None: result['RequestId'] = self.request_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ExtranetEndpoint') is not None: - self.extranet_endpoint = m.get('ExtranetEndpoint') - if m.get('IntranetEndpoint') is not None: - self.intranet_endpoint = m.get('IntranetEndpoint') - if m.get('Name') is not None: - self.name = m.get('Name') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') return self -class GetAI4DDefaultBucketResponse(TeaModel): +class DeleteComponentResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetAI4DDefaultBucketResponseBody = None, + body: DeleteComponentResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -5654,37 +6580,17 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetAI4DDefaultBucketResponseBody() + temp_model = DeleteComponentResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetAlgorithmResponseBody(TeaModel): +class DeleteComponentVersionResponseBody(TeaModel): def __init__( self, - algorithm_description: str = None, - algorithm_id: str = None, - algorithm_name: str = None, - algorithm_provider: str = None, - display_name: str = None, - gmt_create_time: str = None, - gmt_modified_time: str = None, request_id: str = None, - tenant_id: str = None, - user_id: str = None, - workspace_id: str = None, ): - self.algorithm_description = algorithm_description - self.algorithm_id = algorithm_id - self.algorithm_name = algorithm_name - self.algorithm_provider = algorithm_provider - self.display_name = display_name - self.gmt_create_time = gmt_create_time - self.gmt_modified_time = gmt_modified_time self.request_id = request_id - self.tenant_id = tenant_id - self.user_id = user_id - self.workspace_id = workspace_id def validate(self): pass @@ -5695,72 +6601,29 @@ def to_map(self): return _map result = dict() - if self.algorithm_description is not None: - result['AlgorithmDescription'] = self.algorithm_description - if self.algorithm_id is not None: - result['AlgorithmId'] = self.algorithm_id - if self.algorithm_name is not None: - result['AlgorithmName'] = self.algorithm_name - if self.algorithm_provider is not None: - result['AlgorithmProvider'] = self.algorithm_provider - if self.display_name is not None: - result['DisplayName'] = self.display_name - if self.gmt_create_time is not None: - result['GmtCreateTime'] = self.gmt_create_time - if self.gmt_modified_time is not None: - result['GmtModifiedTime'] = self.gmt_modified_time if self.request_id is not None: result['RequestId'] = self.request_id - if self.tenant_id is not None: - result['TenantId'] = self.tenant_id - if self.user_id is not None: - result['UserId'] = self.user_id - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('AlgorithmDescription') is not None: - self.algorithm_description = m.get('AlgorithmDescription') - if m.get('AlgorithmId') is not None: - self.algorithm_id = m.get('AlgorithmId') - if m.get('AlgorithmName') is not None: - self.algorithm_name = m.get('AlgorithmName') - if m.get('AlgorithmProvider') is not None: - self.algorithm_provider = m.get('AlgorithmProvider') - if m.get('DisplayName') is not None: - self.display_name = m.get('DisplayName') - if m.get('GmtCreateTime') is not None: - self.gmt_create_time = m.get('GmtCreateTime') - if m.get('GmtModifiedTime') is not None: - self.gmt_modified_time = m.get('GmtModifiedTime') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('TenantId') is not None: - self.tenant_id = m.get('TenantId') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') return self -class GetAlgorithmResponse(TeaModel): +class DeleteComponentVersionResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetAlgorithmResponseBody = None, + body: DeleteComponentVersionResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -5785,37 +6648,20 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetAlgorithmResponseBody() + temp_model = DeleteComponentVersionResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetAlgorithmVersionResponseBody(TeaModel): +class DeleteComponentVersionSnapshotResponseBody(TeaModel): def __init__( self, - algorithm_id: str = None, - algorithm_name: str = None, - algorithm_provider: str = None, - algorithm_spec: AlgorithmSpec = None, - algorithm_version: str = None, - gmt_create_time: str = None, - gmt_modified_time: str = None, - tenant_id: str = None, - user_id: str = None, + request_id: str = None, ): - self.algorithm_id = algorithm_id - self.algorithm_name = algorithm_name - self.algorithm_provider = algorithm_provider - self.algorithm_spec = algorithm_spec - self.algorithm_version = algorithm_version - self.gmt_create_time = gmt_create_time - self.gmt_modified_time = gmt_modified_time - self.tenant_id = tenant_id - self.user_id = user_id + self.request_id = request_id def validate(self): - if self.algorithm_spec: - self.algorithm_spec.validate() + pass def to_map(self): _map = super().to_map() @@ -5823,65 +6669,29 @@ def to_map(self): return _map result = dict() - if self.algorithm_id is not None: - result['AlgorithmId'] = self.algorithm_id - if self.algorithm_name is not None: - result['AlgorithmName'] = self.algorithm_name - if self.algorithm_provider is not None: - result['AlgorithmProvider'] = self.algorithm_provider - if self.algorithm_spec is not None: - result['AlgorithmSpec'] = self.algorithm_spec.to_map() - if self.algorithm_version is not None: - result['AlgorithmVersion'] = self.algorithm_version - if self.gmt_create_time is not None: - result['GmtCreateTime'] = self.gmt_create_time - if self.gmt_modified_time is not None: - result['GmtModifiedTime'] = self.gmt_modified_time - if self.tenant_id is not None: - result['TenantId'] = self.tenant_id - if self.user_id is not None: - result['UserId'] = self.user_id + if self.request_id is not None: + result['RequestId'] = self.request_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('AlgorithmId') is not None: - self.algorithm_id = m.get('AlgorithmId') - if m.get('AlgorithmName') is not None: - self.algorithm_name = m.get('AlgorithmName') - if m.get('AlgorithmProvider') is not None: - self.algorithm_provider = m.get('AlgorithmProvider') - if m.get('AlgorithmSpec') is not None: - temp_model = AlgorithmSpec() - self.algorithm_spec = temp_model.from_map(m['AlgorithmSpec']) - if m.get('AlgorithmVersion') is not None: - self.algorithm_version = m.get('AlgorithmVersion') - if m.get('GmtCreateTime') is not None: - self.gmt_create_time = m.get('GmtCreateTime') - if m.get('GmtModifiedTime') is not None: - self.gmt_modified_time = m.get('GmtModifiedTime') - if m.get('TenantId') is not None: - self.tenant_id = m.get('TenantId') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') return self -class GetAlgorithmVersionResponse(TeaModel): +class DeleteComponentVersionSnapshotResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetAlgorithmVersionResponseBody = None, + body: DeleteComponentVersionSnapshotResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -5906,23 +6716,19 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetAlgorithmVersionResponseBody() + temp_model = DeleteComponentVersionSnapshotResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetComponentResponseBodyVersions(TeaModel): +class DeleteMachineGroupResponseBody(TeaModel): def __init__( self, - gmt_create_time: str = None, - snapshot_id: str = None, - status: str = None, - version: str = None, + machine_group_id: str = None, + request_id: str = None, ): - self.gmt_create_time = gmt_create_time - self.snapshot_id = snapshot_id - self.status = status - self.version = version + self.machine_group_id = machine_group_id + self.request_id = request_id def validate(self): pass @@ -5933,69 +6739,35 @@ def to_map(self): return _map result = dict() - if self.gmt_create_time is not None: - result['GmtCreateTime'] = self.gmt_create_time - if self.snapshot_id is not None: - result['SnapshotId'] = self.snapshot_id - if self.status is not None: - result['Status'] = self.status - if self.version is not None: - result['Version'] = self.version + if self.machine_group_id is not None: + result['MachineGroupID'] = self.machine_group_id + if self.request_id is not None: + result['RequestId'] = self.request_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('GmtCreateTime') is not None: - self.gmt_create_time = m.get('GmtCreateTime') - if m.get('SnapshotId') is not None: - self.snapshot_id = m.get('SnapshotId') - if m.get('Status') is not None: - self.status = m.get('Status') - if m.get('Version') is not None: - self.version = m.get('Version') + if m.get('MachineGroupID') is not None: + self.machine_group_id = m.get('MachineGroupID') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') return self -class GetComponentResponseBody(TeaModel): +class DeleteMachineGroupResponse(TeaModel): def __init__( self, - component_id: str = None, - description: str = None, - display_name: str = None, - gmt_create_time: str = None, - gmt_modified_time: str = None, - labels: List[Label] = None, - name: str = None, - provider: str = None, - request_id: str = None, - tenant_id: str = None, - user_id: str = None, - versions: List[GetComponentResponseBodyVersions] = None, - workspace_id: str = None, + headers: Dict[str, str] = None, + status_code: int = None, + body: DeleteMachineGroupResponseBody = None, ): - self.component_id = component_id - self.description = description - self.display_name = display_name - self.gmt_create_time = gmt_create_time - self.gmt_modified_time = gmt_modified_time - self.labels = labels - self.name = name - self.provider = provider - self.request_id = request_id - self.tenant_id = tenant_id - self.user_id = user_id - self.versions = versions - self.workspace_id = workspace_id + self.headers = headers + self.status_code = status_code + self.body = body def validate(self): - if self.labels: - for k in self.labels: - if k: - k.validate() - if self.versions: - for k in self.versions: - if k: - k.validate() + if self.body: + self.body.validate() def to_map(self): _map = super().to_map() @@ -6003,90 +6775,72 @@ def to_map(self): return _map result = dict() - if self.component_id is not None: - result['ComponentId'] = self.component_id - if self.description is not None: - result['Description'] = self.description - if self.display_name is not None: - result['DisplayName'] = self.display_name - if self.gmt_create_time is not None: - result['GmtCreateTime'] = self.gmt_create_time - if self.gmt_modified_time is not None: - result['GmtModifiedTime'] = self.gmt_modified_time - result['Labels'] = [] - if self.labels is not None: - for k in self.labels: - result['Labels'].append(k.to_map() if k else None) - if self.name is not None: - result['Name'] = self.name - if self.provider is not None: - result['Provider'] = self.provider - if self.request_id is not None: - result['RequestId'] = self.request_id - if self.tenant_id is not None: - result['TenantId'] = self.tenant_id - if self.user_id is not None: - result['UserId'] = self.user_id - result['Versions'] = [] - if self.versions is not None: - for k in self.versions: - result['Versions'].append(k.to_map() if k else None) - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ComponentId') is not None: - self.component_id = m.get('ComponentId') - if m.get('Description') is not None: - self.description = m.get('Description') - if m.get('DisplayName') is not None: - self.display_name = m.get('DisplayName') - if m.get('GmtCreateTime') is not None: - self.gmt_create_time = m.get('GmtCreateTime') - if m.get('GmtModifiedTime') is not None: - self.gmt_modified_time = m.get('GmtModifiedTime') - self.labels = [] - if m.get('Labels') is not None: - for k in m.get('Labels'): - temp_model = Label() - self.labels.append(temp_model.from_map(k)) - if m.get('Name') is not None: - self.name = m.get('Name') - if m.get('Provider') is not None: - self.provider = m.get('Provider') + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = DeleteMachineGroupResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class DeleteQuotaResponseBody(TeaModel): + def __init__( + self, + quota_id: str = None, + request_id: str = None, + ): + # Quota Id + self.quota_id = quota_id + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.quota_id is not None: + result['QuotaId'] = self.quota_id + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('QuotaId') is not None: + self.quota_id = m.get('QuotaId') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('TenantId') is not None: - self.tenant_id = m.get('TenantId') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') - self.versions = [] - if m.get('Versions') is not None: - for k in m.get('Versions'): - temp_model = GetComponentResponseBodyVersions() - self.versions.append(temp_model.from_map(k)) - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') return self -class GetComponentResponse(TeaModel): +class DeleteQuotaResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetComponentResponseBody = None, + body: DeleteQuotaResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -6111,49 +6865,49 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetComponentResponseBody() + temp_model = DeleteQuotaResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetComponentVersionResponseBody(TeaModel): +class DeleteQuotaLabelsRequest(TeaModel): def __init__( self, - description: str = None, - gmt_create_time: str = None, - gmt_modified_time: str = None, - labels: List[Label] = None, - name: str = None, - provider: str = None, + keys: str = None, + ): + self.keys = keys + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.keys is not None: + result['Keys'] = self.keys + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Keys') is not None: + self.keys = m.get('Keys') + return self + + +class DeleteQuotaLabelsResponseBody(TeaModel): + def __init__( + self, + quota_id: str = None, request_id: str = None, - snapshot_id: str = None, - spec: ComponentSpec = None, - tenant_id: str = None, - user_id: str = None, - version: str = None, - workspace_id: str = None, ): - self.description = description - self.gmt_create_time = gmt_create_time - self.gmt_modified_time = gmt_modified_time - self.labels = labels - self.name = name - self.provider = provider + self.quota_id = quota_id self.request_id = request_id - self.snapshot_id = snapshot_id - self.spec = spec - self.tenant_id = tenant_id - self.user_id = user_id - self.version = version - self.workspace_id = workspace_id def validate(self): - if self.labels: - for k in self.labels: - if k: - k.validate() - if self.spec: - self.spec.validate() + pass def to_map(self): _map = super().to_map() @@ -6161,86 +6915,33 @@ def to_map(self): return _map result = dict() - if self.description is not None: - result['Description'] = self.description - if self.gmt_create_time is not None: - result['GmtCreateTime'] = self.gmt_create_time - if self.gmt_modified_time is not None: - result['GmtModifiedTime'] = self.gmt_modified_time - result['Labels'] = [] - if self.labels is not None: - for k in self.labels: - result['Labels'].append(k.to_map() if k else None) - if self.name is not None: - result['Name'] = self.name - if self.provider is not None: - result['Provider'] = self.provider + if self.quota_id is not None: + result['QuotaId'] = self.quota_id if self.request_id is not None: result['RequestId'] = self.request_id - if self.snapshot_id is not None: - result['SnapshotId'] = self.snapshot_id - if self.spec is not None: - result['Spec'] = self.spec.to_map() - if self.tenant_id is not None: - result['TenantId'] = self.tenant_id - if self.user_id is not None: - result['UserId'] = self.user_id - if self.version is not None: - result['Version'] = self.version - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Description') is not None: - self.description = m.get('Description') - if m.get('GmtCreateTime') is not None: - self.gmt_create_time = m.get('GmtCreateTime') - if m.get('GmtModifiedTime') is not None: - self.gmt_modified_time = m.get('GmtModifiedTime') - self.labels = [] - if m.get('Labels') is not None: - for k in m.get('Labels'): - temp_model = Label() - self.labels.append(temp_model.from_map(k)) - if m.get('Name') is not None: - self.name = m.get('Name') - if m.get('Provider') is not None: - self.provider = m.get('Provider') + if m.get('QuotaId') is not None: + self.quota_id = m.get('QuotaId') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('SnapshotId') is not None: - self.snapshot_id = m.get('SnapshotId') - if m.get('Spec') is not None: - temp_model = ComponentSpec() - self.spec = temp_model.from_map(m['Spec']) - if m.get('TenantId') is not None: - self.tenant_id = m.get('TenantId') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') - if m.get('Version') is not None: - self.version = m.get('Version') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') return self -class GetComponentVersionResponse(TeaModel): +class DeleteQuotaLabelsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetComponentVersionResponseBody = None, + body: DeleteQuotaLabelsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -6265,35 +6966,19 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetComponentVersionResponseBody() + temp_model = DeleteQuotaLabelsResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetComponentVersionSnapshotResponseBody(TeaModel): +class DeleteResourceGroupResponseBody(TeaModel): def __init__( self, - component_id: str = None, - gmt_create_time: str = None, - gmt_modified_time: str = None, - is_current_version: bool = None, request_id: str = None, - snapshot_id: str = None, - tenant_id: str = None, - user_id: str = None, - version: str = None, - workspace_id: str = None, + resource_group_id: str = None, ): - self.component_id = component_id - self.gmt_create_time = gmt_create_time - self.gmt_modified_time = gmt_modified_time - self.is_current_version = is_current_version self.request_id = request_id - self.snapshot_id = snapshot_id - self.tenant_id = tenant_id - self.user_id = user_id - self.version = version - self.workspace_id = workspace_id + self.resource_group_id = resource_group_id def validate(self): pass @@ -6304,68 +6989,33 @@ def to_map(self): return _map result = dict() - if self.component_id is not None: - result['ComponentId'] = self.component_id - if self.gmt_create_time is not None: - result['GmtCreateTime'] = self.gmt_create_time - if self.gmt_modified_time is not None: - result['GmtModifiedTime'] = self.gmt_modified_time - if self.is_current_version is not None: - result['IsCurrentVersion'] = self.is_current_version if self.request_id is not None: result['RequestId'] = self.request_id - if self.snapshot_id is not None: - result['SnapshotId'] = self.snapshot_id - if self.tenant_id is not None: - result['TenantId'] = self.tenant_id - if self.user_id is not None: - result['UserId'] = self.user_id - if self.version is not None: - result['Version'] = self.version - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.resource_group_id is not None: + result['ResourceGroupID'] = self.resource_group_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ComponentId') is not None: - self.component_id = m.get('ComponentId') - if m.get('GmtCreateTime') is not None: - self.gmt_create_time = m.get('GmtCreateTime') - if m.get('GmtModifiedTime') is not None: - self.gmt_modified_time = m.get('GmtModifiedTime') - if m.get('IsCurrentVersion') is not None: - self.is_current_version = m.get('IsCurrentVersion') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('SnapshotId') is not None: - self.snapshot_id = m.get('SnapshotId') - if m.get('TenantId') is not None: - self.tenant_id = m.get('TenantId') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') - if m.get('Version') is not None: - self.version = m.get('Version') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') + if m.get('ResourceGroupID') is not None: + self.resource_group_id = m.get('ResourceGroupID') return self -class GetComponentVersionSnapshotResponse(TeaModel): +class DeleteResourceGroupResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetComponentVersionSnapshotResponseBody = None, + body: DeleteResourceGroupResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -6390,35 +7040,19 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetComponentVersionSnapshotResponseBody() + temp_model = DeleteResourceGroupResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetInstanceJobResponseBody(TeaModel): +class DeleteResourceGroupMachineGroupResponseBody(TeaModel): def __init__( self, - creator: str = None, - gmt_create_time: str = None, - instance_id: str = None, - instance_job_id: str = None, - instance_job_type: str = None, - reason_code: str = None, - reason_message: str = None, + machine_group_id: str = None, request_id: str = None, - status: str = None, - workspace_id: str = None, ): - self.creator = creator - self.gmt_create_time = gmt_create_time - self.instance_id = instance_id - self.instance_job_id = instance_job_id - self.instance_job_type = instance_job_type - self.reason_code = reason_code - self.reason_message = reason_message + self.machine_group_id = machine_group_id self.request_id = request_id - self.status = status - self.workspace_id = workspace_id def validate(self): pass @@ -6429,68 +7063,33 @@ def to_map(self): return _map result = dict() - if self.creator is not None: - result['Creator'] = self.creator - if self.gmt_create_time is not None: - result['GmtCreateTime'] = self.gmt_create_time - if self.instance_id is not None: - result['InstanceId'] = self.instance_id - if self.instance_job_id is not None: - result['InstanceJobId'] = self.instance_job_id - if self.instance_job_type is not None: - result['InstanceJobType'] = self.instance_job_type - if self.reason_code is not None: - result['ReasonCode'] = self.reason_code - if self.reason_message is not None: - result['ReasonMessage'] = self.reason_message + if self.machine_group_id is not None: + result['MachineGroupID'] = self.machine_group_id if self.request_id is not None: result['RequestId'] = self.request_id - if self.status is not None: - result['Status'] = self.status - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Creator') is not None: - self.creator = m.get('Creator') - if m.get('GmtCreateTime') is not None: - self.gmt_create_time = m.get('GmtCreateTime') - if m.get('InstanceId') is not None: - self.instance_id = m.get('InstanceId') - if m.get('InstanceJobId') is not None: - self.instance_job_id = m.get('InstanceJobId') - if m.get('InstanceJobType') is not None: - self.instance_job_type = m.get('InstanceJobType') - if m.get('ReasonCode') is not None: - self.reason_code = m.get('ReasonCode') - if m.get('ReasonMessage') is not None: - self.reason_message = m.get('ReasonMessage') + if m.get('MachineGroupID') is not None: + self.machine_group_id = m.get('MachineGroupID') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('Status') is not None: - self.status = m.get('Status') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') return self -class GetInstanceJobResponse(TeaModel): +class DeleteResourceGroupMachineGroupResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetInstanceJobResponseBody = None, + body: DeleteResourceGroupMachineGroupResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -6515,94 +7114,20 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetInstanceJobResponseBody() + temp_model = DeleteResourceGroupMachineGroupResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetJobViewMetricsRequest(TeaModel): - def __init__( - self, - end_time: str = None, - page_number: int = None, - page_size: int = None, - sort_by: str = None, - start_time: str = None, - time_step: str = None, - workspace_id: str = None, - ): - self.end_time = end_time - self.page_number = page_number - self.page_size = page_size - self.sort_by = sort_by - self.start_time = start_time - self.time_step = time_step - self.workspace_id = workspace_id - - def validate(self): - pass - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.end_time is not None: - result['EndTime'] = self.end_time - if self.page_number is not None: - result['PageNumber'] = self.page_number - if self.page_size is not None: - result['PageSize'] = self.page_size - if self.sort_by is not None: - result['SortBy'] = self.sort_by - if self.start_time is not None: - result['StartTime'] = self.start_time - if self.time_step is not None: - result['TimeStep'] = self.time_step - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('EndTime') is not None: - self.end_time = m.get('EndTime') - if m.get('PageNumber') is not None: - self.page_number = m.get('PageNumber') - if m.get('PageSize') is not None: - self.page_size = m.get('PageSize') - if m.get('SortBy') is not None: - self.sort_by = m.get('SortBy') - if m.get('StartTime') is not None: - self.start_time = m.get('StartTime') - if m.get('TimeStep') is not None: - self.time_step = m.get('TimeStep') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') - return self - - -class GetJobViewMetricsResponseBody(TeaModel): +class DeleteTrainingJobResponseBody(TeaModel): def __init__( self, - job_metrics: List[JobViewMetric] = None, request_id: str = None, - summary: JobViewMetric = None, - total: int = None, ): - self.job_metrics = job_metrics self.request_id = request_id - self.summary = summary - self.total = total def validate(self): - if self.job_metrics: - for k in self.job_metrics: - if k: - k.validate() - if self.summary: - self.summary.validate() + pass def to_map(self): _map = super().to_map() @@ -6610,50 +7135,29 @@ def to_map(self): return _map result = dict() - result['JobMetrics'] = [] - if self.job_metrics is not None: - for k in self.job_metrics: - result['JobMetrics'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id - if self.summary is not None: - result['Summary'] = self.summary.to_map() - if self.total is not None: - result['Total'] = self.total return result def from_map(self, m: dict = None): m = m or dict() - self.job_metrics = [] - if m.get('JobMetrics') is not None: - for k in m.get('JobMetrics'): - temp_model = JobViewMetric() - self.job_metrics.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('Summary') is not None: - temp_model = JobViewMetric() - self.summary = temp_model.from_map(m['Summary']) - if m.get('Total') is not None: - self.total = m.get('Total') return self -class GetJobViewMetricsResponse(TeaModel): +class DeleteTrainingJobResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetJobViewMetricsResponseBody = None, + body: DeleteTrainingJobResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -6678,21 +7182,18 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetJobViewMetricsResponseBody() + temp_model = DeleteTrainingJobResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetJobsStatisticsByQuotaRequest(TeaModel): +class DeleteTrainingJobLabelsRequest(TeaModel): def __init__( self, - end_time: str = None, - start_time: str = None, - workspace_id: str = None, + keys: str = None, ): - self.end_time = end_time - self.start_time = start_time - self.workspace_id = workspace_id + # This parameter is required. + self.keys = keys def validate(self): pass @@ -6703,35 +7204,23 @@ def to_map(self): return _map result = dict() - if self.end_time is not None: - result['EndTime'] = self.end_time - if self.start_time is not None: - result['StartTime'] = self.start_time - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.keys is not None: + result['Keys'] = self.keys return result def from_map(self, m: dict = None): m = m or dict() - if m.get('EndTime') is not None: - self.end_time = m.get('EndTime') - if m.get('StartTime') is not None: - self.start_time = m.get('StartTime') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') + if m.get('Keys') is not None: + self.keys = m.get('Keys') return self -class GetJobsStatisticsByQuotaResponseBody(TeaModel): +class DeleteTrainingJobLabelsResponseBody(TeaModel): def __init__( self, - quota_id: str = None, request_id: str = None, - statistics: Dict[str, Any] = None, ): - self.quota_id = quota_id self.request_id = request_id - self.statistics = statistics def validate(self): pass @@ -6742,40 +7231,29 @@ def to_map(self): return _map result = dict() - if self.quota_id is not None: - result['QuotaId'] = self.quota_id if self.request_id is not None: result['RequestId'] = self.request_id - if self.statistics is not None: - result['Statistics'] = self.statistics return result def from_map(self, m: dict = None): m = m or dict() - if m.get('QuotaId') is not None: - self.quota_id = m.get('QuotaId') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('Statistics') is not None: - self.statistics = m.get('Statistics') return self -class GetJobsStatisticsByQuotaResponse(TeaModel): +class DeleteTrainingJobLabelsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetJobsStatisticsByQuotaResponseBody = None, + body: DeleteTrainingJobLabelsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -6800,58 +7278,23 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetJobsStatisticsByQuotaResponseBody() + temp_model = DeleteTrainingJobLabelsResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetJobsStatisticsByResourceGroupRequest(TeaModel): +class GetAI4DDefaultBucketResponseBody(TeaModel): def __init__( self, - end_time: str = None, - start_time: str = None, - workspace_id: str = None, + extranet_endpoint: str = None, + intranet_endpoint: str = None, + name: str = None, + request_id: str = None, ): - self.end_time = end_time - self.start_time = start_time - self.workspace_id = workspace_id - - def validate(self): - pass - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.end_time is not None: - result['EndTime'] = self.end_time - if self.start_time is not None: - result['StartTime'] = self.start_time - if self.workspace_id is not None: - result['WorkspaceID'] = self.workspace_id - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('EndTime') is not None: - self.end_time = m.get('EndTime') - if m.get('StartTime') is not None: - self.start_time = m.get('StartTime') - if m.get('WorkspaceID') is not None: - self.workspace_id = m.get('WorkspaceID') - return self - - -class GetJobsStatisticsByResourceGroupResponseBody(TeaModel): - def __init__( - self, - request_id: str = None, - statistics: Dict[str, Any] = None, - ): - self.request_id = request_id - self.statistics = statistics + self.extranet_endpoint = extranet_endpoint + self.intranet_endpoint = intranet_endpoint + self.name = name + self.request_id = request_id def validate(self): pass @@ -6862,36 +7305,41 @@ def to_map(self): return _map result = dict() + if self.extranet_endpoint is not None: + result['ExtranetEndpoint'] = self.extranet_endpoint + if self.intranet_endpoint is not None: + result['IntranetEndpoint'] = self.intranet_endpoint + if self.name is not None: + result['Name'] = self.name if self.request_id is not None: result['RequestId'] = self.request_id - if self.statistics is not None: - result['Statistics'] = self.statistics return result def from_map(self, m: dict = None): m = m or dict() + if m.get('ExtranetEndpoint') is not None: + self.extranet_endpoint = m.get('ExtranetEndpoint') + if m.get('IntranetEndpoint') is not None: + self.intranet_endpoint = m.get('IntranetEndpoint') + if m.get('Name') is not None: + self.name = m.get('Name') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('Statistics') is not None: - self.statistics = m.get('Statistics') return self -class GetJobsStatisticsByResourceGroupResponse(TeaModel): +class GetAI4DDefaultBucketResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetJobsStatisticsByResourceGroupResponseBody = None, + body: GetAI4DDefaultBucketResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -6916,49 +7364,37 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetJobsStatisticsByResourceGroupResponseBody() + temp_model = GetAI4DDefaultBucketResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetMachineGroupResponseBody(TeaModel): +class GetAlgorithmResponseBody(TeaModel): def __init__( self, - count: int = None, - default_driver: str = None, - duration: str = None, - ecs_type: str = None, - gmt_created: str = None, - gmt_expired: str = None, - gmt_modified: str = None, - gmt_started: str = None, - machine_group_id: str = None, - order_id: str = None, - pairesource_id: str = None, - pay_type: str = None, - pricing_cycle: str = None, - region_id: str = None, + algorithm_description: str = None, + algorithm_id: str = None, + algorithm_name: str = None, + algorithm_provider: str = None, + display_name: str = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, request_id: str = None, - status: str = None, - supported_drivers: List[str] = None, + tenant_id: str = None, + user_id: str = None, + workspace_id: str = None, ): - self.count = count - self.default_driver = default_driver - self.duration = duration - self.ecs_type = ecs_type - self.gmt_created = gmt_created - self.gmt_expired = gmt_expired - self.gmt_modified = gmt_modified - self.gmt_started = gmt_started - self.machine_group_id = machine_group_id - self.order_id = order_id - self.pairesource_id = pairesource_id - self.pay_type = pay_type - self.pricing_cycle = pricing_cycle - self.region_id = region_id + self.algorithm_description = algorithm_description + self.algorithm_id = algorithm_id + self.algorithm_name = algorithm_name + self.algorithm_provider = algorithm_provider + self.display_name = display_name + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time self.request_id = request_id - self.status = status - self.supported_drivers = supported_drivers + self.tenant_id = tenant_id + self.user_id = user_id + self.workspace_id = workspace_id def validate(self): pass @@ -6969,96 +7405,69 @@ def to_map(self): return _map result = dict() - if self.count is not None: - result['Count'] = self.count - if self.default_driver is not None: - result['DefaultDriver'] = self.default_driver - if self.duration is not None: - result['Duration'] = self.duration - if self.ecs_type is not None: - result['EcsType'] = self.ecs_type - if self.gmt_created is not None: - result['GmtCreated'] = self.gmt_created - if self.gmt_expired is not None: - result['GmtExpired'] = self.gmt_expired - if self.gmt_modified is not None: - result['GmtModified'] = self.gmt_modified - if self.gmt_started is not None: - result['GmtStarted'] = self.gmt_started - if self.machine_group_id is not None: - result['MachineGroupID'] = self.machine_group_id - if self.order_id is not None: - result['OrderID'] = self.order_id - if self.pairesource_id is not None: - result['PAIResourceID'] = self.pairesource_id - if self.pay_type is not None: - result['PayType'] = self.pay_type - if self.pricing_cycle is not None: - result['PricingCycle'] = self.pricing_cycle - if self.region_id is not None: - result['RegionID'] = self.region_id + if self.algorithm_description is not None: + result['AlgorithmDescription'] = self.algorithm_description + if self.algorithm_id is not None: + result['AlgorithmId'] = self.algorithm_id + if self.algorithm_name is not None: + result['AlgorithmName'] = self.algorithm_name + if self.algorithm_provider is not None: + result['AlgorithmProvider'] = self.algorithm_provider + if self.display_name is not None: + result['DisplayName'] = self.display_name + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time if self.request_id is not None: result['RequestId'] = self.request_id - if self.status is not None: - result['Status'] = self.status - if self.supported_drivers is not None: - result['SupportedDrivers'] = self.supported_drivers + if self.tenant_id is not None: + result['TenantId'] = self.tenant_id + if self.user_id is not None: + result['UserId'] = self.user_id + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Count') is not None: - self.count = m.get('Count') - if m.get('DefaultDriver') is not None: - self.default_driver = m.get('DefaultDriver') - if m.get('Duration') is not None: - self.duration = m.get('Duration') - if m.get('EcsType') is not None: - self.ecs_type = m.get('EcsType') - if m.get('GmtCreated') is not None: - self.gmt_created = m.get('GmtCreated') - if m.get('GmtExpired') is not None: - self.gmt_expired = m.get('GmtExpired') - if m.get('GmtModified') is not None: - self.gmt_modified = m.get('GmtModified') - if m.get('GmtStarted') is not None: - self.gmt_started = m.get('GmtStarted') - if m.get('MachineGroupID') is not None: - self.machine_group_id = m.get('MachineGroupID') - if m.get('OrderID') is not None: - self.order_id = m.get('OrderID') - if m.get('PAIResourceID') is not None: - self.pairesource_id = m.get('PAIResourceID') - if m.get('PayType') is not None: - self.pay_type = m.get('PayType') - if m.get('PricingCycle') is not None: - self.pricing_cycle = m.get('PricingCycle') - if m.get('RegionID') is not None: - self.region_id = m.get('RegionID') + if m.get('AlgorithmDescription') is not None: + self.algorithm_description = m.get('AlgorithmDescription') + if m.get('AlgorithmId') is not None: + self.algorithm_id = m.get('AlgorithmId') + if m.get('AlgorithmName') is not None: + self.algorithm_name = m.get('AlgorithmName') + if m.get('AlgorithmProvider') is not None: + self.algorithm_provider = m.get('AlgorithmProvider') + if m.get('DisplayName') is not None: + self.display_name = m.get('DisplayName') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('Status') is not None: - self.status = m.get('Status') - if m.get('SupportedDrivers') is not None: - self.supported_drivers = m.get('SupportedDrivers') + if m.get('TenantId') is not None: + self.tenant_id = m.get('TenantId') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class GetMachineGroupResponse(TeaModel): +class GetAlgorithmResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetMachineGroupResponseBody = None, + body: GetAlgorithmResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -7083,28 +7492,37 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetMachineGroupResponseBody() + temp_model = GetAlgorithmResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetNodeMetricsRequest(TeaModel): +class GetAlgorithmVersionResponseBody(TeaModel): def __init__( self, - end_time: str = None, - gputype: str = None, - start_time: str = None, - time_step: str = None, - verbose: bool = None, + algorithm_id: str = None, + algorithm_name: str = None, + algorithm_provider: str = None, + algorithm_spec: AlgorithmSpec = None, + algorithm_version: str = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + tenant_id: str = None, + user_id: str = None, ): - self.end_time = end_time - self.gputype = gputype - self.start_time = start_time - self.time_step = time_step - self.verbose = verbose + self.algorithm_id = algorithm_id + self.algorithm_name = algorithm_name + self.algorithm_provider = algorithm_provider + self.algorithm_spec = algorithm_spec + self.algorithm_version = algorithm_version + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.tenant_id = tenant_id + self.user_id = user_id def validate(self): - pass + if self.algorithm_spec: + self.algorithm_spec.validate() def to_map(self): _map = super().to_map() @@ -7112,95 +7530,62 @@ def to_map(self): return _map result = dict() - if self.end_time is not None: - result['EndTime'] = self.end_time - if self.gputype is not None: - result['GPUType'] = self.gputype - if self.start_time is not None: - result['StartTime'] = self.start_time - if self.time_step is not None: - result['TimeStep'] = self.time_step - if self.verbose is not None: - result['Verbose'] = self.verbose - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('EndTime') is not None: - self.end_time = m.get('EndTime') - if m.get('GPUType') is not None: - self.gputype = m.get('GPUType') - if m.get('StartTime') is not None: - self.start_time = m.get('StartTime') - if m.get('TimeStep') is not None: - self.time_step = m.get('TimeStep') - if m.get('Verbose') is not None: - self.verbose = m.get('Verbose') - return self - - -class GetNodeMetricsResponseBody(TeaModel): - def __init__( - self, - metric_type: str = None, - nodes_metrics: List[NodeMetric] = None, - resource_group_id: str = None, - ): - self.metric_type = metric_type - self.nodes_metrics = nodes_metrics - self.resource_group_id = resource_group_id - - def validate(self): - if self.nodes_metrics: - for k in self.nodes_metrics: - if k: - k.validate() - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.metric_type is not None: - result['MetricType'] = self.metric_type - result['NodesMetrics'] = [] - if self.nodes_metrics is not None: - for k in self.nodes_metrics: - result['NodesMetrics'].append(k.to_map() if k else None) - if self.resource_group_id is not None: - result['ResourceGroupID'] = self.resource_group_id + if self.algorithm_id is not None: + result['AlgorithmId'] = self.algorithm_id + if self.algorithm_name is not None: + result['AlgorithmName'] = self.algorithm_name + if self.algorithm_provider is not None: + result['AlgorithmProvider'] = self.algorithm_provider + if self.algorithm_spec is not None: + result['AlgorithmSpec'] = self.algorithm_spec.to_map() + if self.algorithm_version is not None: + result['AlgorithmVersion'] = self.algorithm_version + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.tenant_id is not None: + result['TenantId'] = self.tenant_id + if self.user_id is not None: + result['UserId'] = self.user_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('MetricType') is not None: - self.metric_type = m.get('MetricType') - self.nodes_metrics = [] - if m.get('NodesMetrics') is not None: - for k in m.get('NodesMetrics'): - temp_model = NodeMetric() - self.nodes_metrics.append(temp_model.from_map(k)) - if m.get('ResourceGroupID') is not None: - self.resource_group_id = m.get('ResourceGroupID') + if m.get('AlgorithmId') is not None: + self.algorithm_id = m.get('AlgorithmId') + if m.get('AlgorithmName') is not None: + self.algorithm_name = m.get('AlgorithmName') + if m.get('AlgorithmProvider') is not None: + self.algorithm_provider = m.get('AlgorithmProvider') + if m.get('AlgorithmSpec') is not None: + temp_model = AlgorithmSpec() + self.algorithm_spec = temp_model.from_map(m['AlgorithmSpec']) + if m.get('AlgorithmVersion') is not None: + self.algorithm_version = m.get('AlgorithmVersion') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('TenantId') is not None: + self.tenant_id = m.get('TenantId') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') return self -class GetNodeMetricsResponse(TeaModel): +class GetAlgorithmVersionResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetNodeMetricsResponseBody = None, + body: GetAlgorithmVersionResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -7225,25 +7610,24 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetNodeMetricsResponseBody() + temp_model = GetAlgorithmVersionResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetNodeViewMetricsRequest(TeaModel): +class GetComponentResponseBodyVersions(TeaModel): def __init__( self, - node_id: str = None, - page_number: int = None, - page_size: int = None, - time_step: str = None, - workspace_id: str = None, + gmt_create_time: str = None, + snapshot_id: str = None, + status: str = None, + version: str = None, ): - self.node_id = node_id - self.page_number = page_number - self.page_size = page_size - self.time_step = time_step - self.workspace_id = workspace_id + # Use the UTC time format: yyyy-MM-ddTHH:mmZ + self.gmt_create_time = gmt_create_time + self.snapshot_id = snapshot_id + self.status = status + self.version = version def validate(self): pass @@ -7254,45 +7638,69 @@ def to_map(self): return _map result = dict() - if self.node_id is not None: - result['NodeId'] = self.node_id - if self.page_number is not None: - result['PageNumber'] = self.page_number - if self.page_size is not None: - result['PageSize'] = self.page_size - if self.time_step is not None: - result['TimeStep'] = self.time_step - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.snapshot_id is not None: + result['SnapshotId'] = self.snapshot_id + if self.status is not None: + result['Status'] = self.status + if self.version is not None: + result['Version'] = self.version return result def from_map(self, m: dict = None): m = m or dict() - if m.get('NodeId') is not None: - self.node_id = m.get('NodeId') - if m.get('PageNumber') is not None: - self.page_number = m.get('PageNumber') - if m.get('PageSize') is not None: - self.page_size = m.get('PageSize') - if m.get('TimeStep') is not None: - self.time_step = m.get('TimeStep') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('SnapshotId') is not None: + self.snapshot_id = m.get('SnapshotId') + if m.get('Status') is not None: + self.status = m.get('Status') + if m.get('Version') is not None: + self.version = m.get('Version') return self -class GetNodeViewMetricsResponseBody(TeaModel): +class GetComponentResponseBody(TeaModel): def __init__( self, - node_metrics: List[NodeViewMetric] = None, - total: int = None, + component_id: str = None, + description: str = None, + display_name: str = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + labels: List[Label] = None, + name: str = None, + provider: str = None, + request_id: str = None, + tenant_id: str = None, + user_id: str = None, + versions: List[GetComponentResponseBodyVersions] = None, + workspace_id: str = None, ): - self.node_metrics = node_metrics - self.total = total + self.component_id = component_id + self.description = description + self.display_name = display_name + # Use the UTC time format: yyyy-MM-ddTHH:mmZ + self.gmt_create_time = gmt_create_time + # Use the UTC time format: yyyy-MM-ddTHH:mmZ + self.gmt_modified_time = gmt_modified_time + self.labels = labels + self.name = name + self.provider = provider + self.request_id = request_id + self.tenant_id = tenant_id + self.user_id = user_id + self.versions = versions + self.workspace_id = workspace_id def validate(self): - if self.node_metrics: - for k in self.node_metrics: + if self.labels: + for k in self.labels: + if k: + k.validate() + if self.versions: + for k in self.versions: if k: k.validate() @@ -7302,45 +7710,91 @@ def to_map(self): return _map result = dict() - result['NodeMetrics'] = [] - if self.node_metrics is not None: - for k in self.node_metrics: - result['NodeMetrics'].append(k.to_map() if k else None) - if self.total is not None: - result['Total'] = self.total + if self.component_id is not None: + result['ComponentId'] = self.component_id + if self.description is not None: + result['Description'] = self.description + if self.display_name is not None: + result['DisplayName'] = self.display_name + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.name is not None: + result['Name'] = self.name + if self.provider is not None: + result['Provider'] = self.provider + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.tenant_id is not None: + result['TenantId'] = self.tenant_id + if self.user_id is not None: + result['UserId'] = self.user_id + result['Versions'] = [] + if self.versions is not None: + for k in self.versions: + result['Versions'].append(k.to_map() if k else None) + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - self.node_metrics = [] - if m.get('NodeMetrics') is not None: - for k in m.get('NodeMetrics'): - temp_model = NodeViewMetric() - self.node_metrics.append(temp_model.from_map(k)) - if m.get('Total') is not None: - self.total = m.get('Total') - return self - - -class GetNodeViewMetricsResponse(TeaModel): - def __init__( - self, - headers: Dict[str, str] = None, - status_code: int = None, - body: GetNodeViewMetricsResponseBody = None, - ): - self.headers = headers - self.status_code = status_code - self.body = body - - def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() - - def to_map(self): + if m.get('ComponentId') is not None: + self.component_id = m.get('ComponentId') + if m.get('Description') is not None: + self.description = m.get('Description') + if m.get('DisplayName') is not None: + self.display_name = m.get('DisplayName') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = Label() + self.labels.append(temp_model.from_map(k)) + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('Provider') is not None: + self.provider = m.get('Provider') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('TenantId') is not None: + self.tenant_id = m.get('TenantId') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + self.versions = [] + if m.get('Versions') is not None: + for k in m.get('Versions'): + temp_model = GetComponentResponseBodyVersions() + self.versions.append(temp_model.from_map(k)) + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + return self + + +class GetComponentResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetComponentResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): _map = super().to_map() if _map is not None: return _map @@ -7361,48 +7815,51 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetNodeViewMetricsResponseBody() + temp_model = GetComponentResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetOperationResponseBody(TeaModel): +class GetComponentVersionResponseBody(TeaModel): def __init__( self, - creator_id: str = None, - gmt_created_time: str = None, - gmt_end_time: str = None, + description: str = None, + gmt_create_time: str = None, gmt_modified_time: str = None, - gmt_start_time: str = None, - object_id: str = None, - object_type: str = None, - operation_description: str = None, - operation_id: str = None, - operation_spec_json: str = None, - operation_type: str = None, - reason_code: str = None, - reason_message: str = None, + labels: List[Label] = None, + name: str = None, + provider: str = None, request_id: str = None, - status: str = None, + snapshot_id: str = None, + spec: ComponentSpec = None, + tenant_id: str = None, + user_id: str = None, + version: str = None, + workspace_id: str = None, ): - self.creator_id = creator_id - self.gmt_created_time = gmt_created_time - self.gmt_end_time = gmt_end_time + self.description = description + # Use the UTC time format: yyyy-MM-ddTHH:mmZ + self.gmt_create_time = gmt_create_time + # Use the UTC time format: yyyy-MM-ddTHH:mmZ self.gmt_modified_time = gmt_modified_time - self.gmt_start_time = gmt_start_time - self.object_id = object_id - self.object_type = object_type - self.operation_description = operation_description - self.operation_id = operation_id - self.operation_spec_json = operation_spec_json - self.operation_type = operation_type - self.reason_code = reason_code - self.reason_message = reason_message + self.labels = labels + self.name = name + self.provider = provider self.request_id = request_id - self.status = status + self.snapshot_id = snapshot_id + self.spec = spec + self.tenant_id = tenant_id + self.user_id = user_id + self.version = version + self.workspace_id = workspace_id def validate(self): - pass + if self.labels: + for k in self.labels: + if k: + k.validate() + if self.spec: + self.spec.validate() def to_map(self): _map = super().to_map() @@ -7410,88 +7867,83 @@ def to_map(self): return _map result = dict() - if self.creator_id is not None: - result['CreatorId'] = self.creator_id - if self.gmt_created_time is not None: - result['GmtCreatedTime'] = self.gmt_created_time - if self.gmt_end_time is not None: - result['GmtEndTime'] = self.gmt_end_time + if self.description is not None: + result['Description'] = self.description + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time if self.gmt_modified_time is not None: result['GmtModifiedTime'] = self.gmt_modified_time - if self.gmt_start_time is not None: - result['GmtStartTime'] = self.gmt_start_time - if self.object_id is not None: - result['ObjectId'] = self.object_id - if self.object_type is not None: - result['ObjectType'] = self.object_type - if self.operation_description is not None: - result['OperationDescription'] = self.operation_description - if self.operation_id is not None: - result['OperationId'] = self.operation_id - if self.operation_spec_json is not None: - result['OperationSpecJson'] = self.operation_spec_json - if self.operation_type is not None: - result['OperationType'] = self.operation_type - if self.reason_code is not None: - result['ReasonCode'] = self.reason_code - if self.reason_message is not None: - result['ReasonMessage'] = self.reason_message + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.name is not None: + result['Name'] = self.name + if self.provider is not None: + result['Provider'] = self.provider if self.request_id is not None: result['RequestId'] = self.request_id - if self.status is not None: - result['Status'] = self.status + if self.snapshot_id is not None: + result['SnapshotId'] = self.snapshot_id + if self.spec is not None: + result['Spec'] = self.spec.to_map() + if self.tenant_id is not None: + result['TenantId'] = self.tenant_id + if self.user_id is not None: + result['UserId'] = self.user_id + if self.version is not None: + result['Version'] = self.version + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('CreatorId') is not None: - self.creator_id = m.get('CreatorId') - if m.get('GmtCreatedTime') is not None: - self.gmt_created_time = m.get('GmtCreatedTime') - if m.get('GmtEndTime') is not None: - self.gmt_end_time = m.get('GmtEndTime') + if m.get('Description') is not None: + self.description = m.get('Description') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') if m.get('GmtModifiedTime') is not None: self.gmt_modified_time = m.get('GmtModifiedTime') - if m.get('GmtStartTime') is not None: - self.gmt_start_time = m.get('GmtStartTime') - if m.get('ObjectId') is not None: - self.object_id = m.get('ObjectId') - if m.get('ObjectType') is not None: - self.object_type = m.get('ObjectType') - if m.get('OperationDescription') is not None: - self.operation_description = m.get('OperationDescription') - if m.get('OperationId') is not None: - self.operation_id = m.get('OperationId') - if m.get('OperationSpecJson') is not None: - self.operation_spec_json = m.get('OperationSpecJson') - if m.get('OperationType') is not None: - self.operation_type = m.get('OperationType') - if m.get('ReasonCode') is not None: - self.reason_code = m.get('ReasonCode') - if m.get('ReasonMessage') is not None: - self.reason_message = m.get('ReasonMessage') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = Label() + self.labels.append(temp_model.from_map(k)) + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('Provider') is not None: + self.provider = m.get('Provider') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('Status') is not None: - self.status = m.get('Status') + if m.get('SnapshotId') is not None: + self.snapshot_id = m.get('SnapshotId') + if m.get('Spec') is not None: + temp_model = ComponentSpec() + self.spec = temp_model.from_map(m['Spec']) + if m.get('TenantId') is not None: + self.tenant_id = m.get('TenantId') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('Version') is not None: + self.version = m.get('Version') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class GetOperationResponse(TeaModel): +class GetComponentVersionResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetOperationResponseBody = None, + body: GetComponentVersionResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -7516,77 +7968,40 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetOperationResponseBody() + temp_model = GetComponentVersionResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetQuotaResponseBody(TeaModel): +class GetComponentVersionSnapshotResponseBody(TeaModel): def __init__( self, - allocate_strategy: str = None, - creator_id: str = None, - description: str = None, - gmt_created_time: str = None, + component_id: str = None, + gmt_create_time: str = None, gmt_modified_time: str = None, - labels: List[Label] = None, - latest_operation_id: str = None, - min: AllocateStrategySpec = None, - parent_quota_id: str = None, - quota_config: QuotaConfig = None, - quota_details: QuotaDetails = None, - quota_id: str = None, - quota_name: str = None, - reason_code: str = None, - reason_message: str = None, + is_current_version: bool = None, request_id: str = None, - resource_group_ids: List[str] = None, - resource_type: str = None, - status: str = None, - sub_quotas: List[QuotaIdName] = None, - workspaces: List[WorkspaceIdName] = None, + snapshot_id: str = None, + tenant_id: str = None, + user_id: str = None, + version: str = None, + workspace_id: str = None, ): - self.allocate_strategy = allocate_strategy - self.creator_id = creator_id - self.description = description - self.gmt_created_time = gmt_created_time + self.component_id = component_id + # Use the UTC time format: yyyy-MM-ddTHH:mmZ + self.gmt_create_time = gmt_create_time + # Use the UTC time format: yyyy-MM-ddTHH:mmZ self.gmt_modified_time = gmt_modified_time - self.labels = labels - self.latest_operation_id = latest_operation_id - self.min = min - self.parent_quota_id = parent_quota_id - self.quota_config = quota_config - self.quota_details = quota_details - self.quota_id = quota_id - self.quota_name = quota_name - self.reason_code = reason_code - self.reason_message = reason_message + self.is_current_version = is_current_version self.request_id = request_id - self.resource_group_ids = resource_group_ids - self.resource_type = resource_type - self.status = status - self.sub_quotas = sub_quotas - self.workspaces = workspaces + self.snapshot_id = snapshot_id + self.tenant_id = tenant_id + self.user_id = user_id + self.version = version + self.workspace_id = workspace_id def validate(self): - if self.labels: - for k in self.labels: - if k: - k.validate() - if self.min: - self.min.validate() - if self.quota_config: - self.quota_config.validate() - if self.quota_details: - self.quota_details.validate() - if self.sub_quotas: - for k in self.sub_quotas: - if k: - k.validate() - if self.workspaces: - for k in self.workspaces: - if k: - k.validate() + pass def to_map(self): _map = super().to_map() @@ -7594,130 +8009,65 @@ def to_map(self): return _map result = dict() - if self.allocate_strategy is not None: - result['AllocateStrategy'] = self.allocate_strategy - if self.creator_id is not None: - result['CreatorId'] = self.creator_id - if self.description is not None: - result['Description'] = self.description - if self.gmt_created_time is not None: - result['GmtCreatedTime'] = self.gmt_created_time + if self.component_id is not None: + result['ComponentId'] = self.component_id + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time if self.gmt_modified_time is not None: result['GmtModifiedTime'] = self.gmt_modified_time - result['Labels'] = [] - if self.labels is not None: - for k in self.labels: - result['Labels'].append(k.to_map() if k else None) - if self.latest_operation_id is not None: - result['LatestOperationId'] = self.latest_operation_id - if self.min is not None: - result['Min'] = self.min.to_map() - if self.parent_quota_id is not None: - result['ParentQuotaId'] = self.parent_quota_id - if self.quota_config is not None: - result['QuotaConfig'] = self.quota_config.to_map() - if self.quota_details is not None: - result['QuotaDetails'] = self.quota_details.to_map() - if self.quota_id is not None: - result['QuotaId'] = self.quota_id - if self.quota_name is not None: - result['QuotaName'] = self.quota_name - if self.reason_code is not None: - result['ReasonCode'] = self.reason_code - if self.reason_message is not None: - result['ReasonMessage'] = self.reason_message + if self.is_current_version is not None: + result['IsCurrentVersion'] = self.is_current_version if self.request_id is not None: result['RequestId'] = self.request_id - if self.resource_group_ids is not None: - result['ResourceGroupIds'] = self.resource_group_ids - if self.resource_type is not None: - result['ResourceType'] = self.resource_type - if self.status is not None: - result['Status'] = self.status - result['SubQuotas'] = [] - if self.sub_quotas is not None: - for k in self.sub_quotas: - result['SubQuotas'].append(k.to_map() if k else None) - result['Workspaces'] = [] - if self.workspaces is not None: - for k in self.workspaces: - result['Workspaces'].append(k.to_map() if k else None) + if self.snapshot_id is not None: + result['SnapshotId'] = self.snapshot_id + if self.tenant_id is not None: + result['TenantId'] = self.tenant_id + if self.user_id is not None: + result['UserId'] = self.user_id + if self.version is not None: + result['Version'] = self.version + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('AllocateStrategy') is not None: - self.allocate_strategy = m.get('AllocateStrategy') - if m.get('CreatorId') is not None: - self.creator_id = m.get('CreatorId') - if m.get('Description') is not None: - self.description = m.get('Description') - if m.get('GmtCreatedTime') is not None: - self.gmt_created_time = m.get('GmtCreatedTime') + if m.get('ComponentId') is not None: + self.component_id = m.get('ComponentId') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') if m.get('GmtModifiedTime') is not None: self.gmt_modified_time = m.get('GmtModifiedTime') - self.labels = [] - if m.get('Labels') is not None: - for k in m.get('Labels'): - temp_model = Label() - self.labels.append(temp_model.from_map(k)) - if m.get('LatestOperationId') is not None: - self.latest_operation_id = m.get('LatestOperationId') - if m.get('Min') is not None: - temp_model = AllocateStrategySpec() - self.min = temp_model.from_map(m['Min']) - if m.get('ParentQuotaId') is not None: - self.parent_quota_id = m.get('ParentQuotaId') - if m.get('QuotaConfig') is not None: - temp_model = QuotaConfig() - self.quota_config = temp_model.from_map(m['QuotaConfig']) - if m.get('QuotaDetails') is not None: - temp_model = QuotaDetails() - self.quota_details = temp_model.from_map(m['QuotaDetails']) - if m.get('QuotaId') is not None: - self.quota_id = m.get('QuotaId') - if m.get('QuotaName') is not None: - self.quota_name = m.get('QuotaName') - if m.get('ReasonCode') is not None: - self.reason_code = m.get('ReasonCode') - if m.get('ReasonMessage') is not None: - self.reason_message = m.get('ReasonMessage') + if m.get('IsCurrentVersion') is not None: + self.is_current_version = m.get('IsCurrentVersion') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('ResourceGroupIds') is not None: - self.resource_group_ids = m.get('ResourceGroupIds') - if m.get('ResourceType') is not None: - self.resource_type = m.get('ResourceType') - if m.get('Status') is not None: - self.status = m.get('Status') - self.sub_quotas = [] - if m.get('SubQuotas') is not None: - for k in m.get('SubQuotas'): - temp_model = QuotaIdName() - self.sub_quotas.append(temp_model.from_map(k)) - self.workspaces = [] - if m.get('Workspaces') is not None: - for k in m.get('Workspaces'): - temp_model = WorkspaceIdName() - self.workspaces.append(temp_model.from_map(k)) + if m.get('SnapshotId') is not None: + self.snapshot_id = m.get('SnapshotId') + if m.get('TenantId') is not None: + self.tenant_id = m.get('TenantId') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('Version') is not None: + self.version = m.get('Version') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class GetQuotaResponse(TeaModel): +class GetComponentVersionSnapshotResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetQuotaResponseBody = None, + body: GetComponentVersionSnapshotResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -7742,30 +8092,35 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetQuotaResponseBody() + temp_model = GetComponentVersionSnapshotResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetQuotaJobViewMetricsRequest(TeaModel): +class GetInstanceJobResponseBody(TeaModel): def __init__( self, - end_time: str = None, - order: str = None, - page_number: int = None, - page_size: int = None, - sort_by: str = None, - start_time: str = None, - time_step: str = None, + creator: str = None, + gmt_create_time: str = None, + instance_id: str = None, + instance_job_id: str = None, + instance_job_type: str = None, + reason_code: str = None, + reason_message: str = None, + request_id: str = None, + status: str = None, workspace_id: str = None, ): - self.end_time = end_time - self.order = order - self.page_number = page_number - self.page_size = page_size - self.sort_by = sort_by - self.start_time = start_time - self.time_step = time_step + self.creator = creator + # Use the UTC time format: yyyy-MM-ddTHH:mmZ + self.gmt_create_time = gmt_create_time + self.instance_id = instance_id + self.instance_job_id = instance_job_id + self.instance_job_type = instance_job_type + self.reason_code = reason_code + self.reason_message = reason_message + self.request_id = request_id + self.status = status self.workspace_id = workspace_id def validate(self): @@ -7777,122 +8132,65 @@ def to_map(self): return _map result = dict() - if self.end_time is not None: - result['EndTime'] = self.end_time - if self.order is not None: - result['Order'] = self.order - if self.page_number is not None: - result['PageNumber'] = self.page_number - if self.page_size is not None: - result['PageSize'] = self.page_size - if self.sort_by is not None: - result['SortBy'] = self.sort_by - if self.start_time is not None: - result['StartTime'] = self.start_time - if self.time_step is not None: - result['TimeStep'] = self.time_step + if self.creator is not None: + result['Creator'] = self.creator + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + if self.instance_job_id is not None: + result['InstanceJobId'] = self.instance_job_id + if self.instance_job_type is not None: + result['InstanceJobType'] = self.instance_job_type + if self.reason_code is not None: + result['ReasonCode'] = self.reason_code + if self.reason_message is not None: + result['ReasonMessage'] = self.reason_message + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.status is not None: + result['Status'] = self.status if self.workspace_id is not None: result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('EndTime') is not None: - self.end_time = m.get('EndTime') - if m.get('Order') is not None: - self.order = m.get('Order') - if m.get('PageNumber') is not None: - self.page_number = m.get('PageNumber') - if m.get('PageSize') is not None: - self.page_size = m.get('PageSize') - if m.get('SortBy') is not None: - self.sort_by = m.get('SortBy') - if m.get('StartTime') is not None: - self.start_time = m.get('StartTime') - if m.get('TimeStep') is not None: - self.time_step = m.get('TimeStep') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') - return self - - -class GetQuotaJobViewMetricsResponseBody(TeaModel): - def __init__( - self, - job_metrics: List[QuotaJobViewMetric] = None, - quota_id: str = None, - request_id: str = None, - summary: QuotaJobViewMetric = None, - total_count: int = None, - ): - self.job_metrics = job_metrics - self.quota_id = quota_id - self.request_id = request_id - self.summary = summary - self.total_count = total_count - - def validate(self): - if self.job_metrics: - for k in self.job_metrics: - if k: - k.validate() - if self.summary: - self.summary.validate() - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - result['JobMetrics'] = [] - if self.job_metrics is not None: - for k in self.job_metrics: - result['JobMetrics'].append(k.to_map() if k else None) - if self.quota_id is not None: - result['QuotaId'] = self.quota_id - if self.request_id is not None: - result['RequestId'] = self.request_id - if self.summary is not None: - result['Summary'] = self.summary.to_map() - if self.total_count is not None: - result['TotalCount'] = self.total_count - return result - - def from_map(self, m: dict = None): - m = m or dict() - self.job_metrics = [] - if m.get('JobMetrics') is not None: - for k in m.get('JobMetrics'): - temp_model = QuotaJobViewMetric() - self.job_metrics.append(temp_model.from_map(k)) - if m.get('QuotaId') is not None: - self.quota_id = m.get('QuotaId') + if m.get('Creator') is not None: + self.creator = m.get('Creator') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + if m.get('InstanceJobId') is not None: + self.instance_job_id = m.get('InstanceJobId') + if m.get('InstanceJobType') is not None: + self.instance_job_type = m.get('InstanceJobType') + if m.get('ReasonCode') is not None: + self.reason_code = m.get('ReasonCode') + if m.get('ReasonMessage') is not None: + self.reason_message = m.get('ReasonMessage') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('Summary') is not None: - temp_model = QuotaJobViewMetric() - self.summary = temp_model.from_map(m['Summary']) - if m.get('TotalCount') is not None: - self.total_count = m.get('TotalCount') + if m.get('Status') is not None: + self.status = m.get('Status') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class GetQuotaJobViewMetricsResponse(TeaModel): +class GetInstanceJobResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetQuotaJobViewMetricsResponseBody = None, + body: GetInstanceJobResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -7917,23 +8215,29 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetQuotaJobViewMetricsResponseBody() + temp_model = GetInstanceJobResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetQuotaMetricsRequest(TeaModel): +class GetJobViewMetricsRequest(TeaModel): def __init__( self, end_time: str = None, - gputype: str = None, + page_number: int = None, + page_size: int = None, + sort_by: str = None, start_time: str = None, time_step: str = None, + workspace_id: str = None, ): self.end_time = end_time - self.gputype = gputype + self.page_number = page_number + self.page_size = page_size + self.sort_by = sort_by self.start_time = start_time self.time_step = time_step + self.workspace_id = workspace_id def validate(self): pass @@ -7946,43 +8250,59 @@ def to_map(self): result = dict() if self.end_time is not None: result['EndTime'] = self.end_time - if self.gputype is not None: - result['GPUType'] = self.gputype + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.sort_by is not None: + result['SortBy'] = self.sort_by if self.start_time is not None: result['StartTime'] = self.start_time if self.time_step is not None: result['TimeStep'] = self.time_step + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() if m.get('EndTime') is not None: self.end_time = m.get('EndTime') - if m.get('GPUType') is not None: - self.gputype = m.get('GPUType') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') if m.get('StartTime') is not None: self.start_time = m.get('StartTime') if m.get('TimeStep') is not None: self.time_step = m.get('TimeStep') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class GetQuotaMetricsResponseBody(TeaModel): +class GetJobViewMetricsResponseBody(TeaModel): def __init__( self, - quota_id: str = None, - quota_metrics: List[QuotaMetric] = None, + job_metrics: List[JobViewMetric] = None, request_id: str = None, + summary: JobViewMetric = None, + total: int = None, ): - self.quota_id = quota_id - self.quota_metrics = quota_metrics + self.job_metrics = job_metrics self.request_id = request_id + self.summary = summary + self.total = total def validate(self): - if self.quota_metrics: - for k in self.quota_metrics: + if self.job_metrics: + for k in self.job_metrics: if k: k.validate() + if self.summary: + self.summary.validate() def to_map(self): _map = super().to_map() @@ -7990,45 +8310,47 @@ def to_map(self): return _map result = dict() - if self.quota_id is not None: - result['QuotaId'] = self.quota_id - result['QuotaMetrics'] = [] - if self.quota_metrics is not None: - for k in self.quota_metrics: - result['QuotaMetrics'].append(k.to_map() if k else None) + result['JobMetrics'] = [] + if self.job_metrics is not None: + for k in self.job_metrics: + result['JobMetrics'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id + if self.summary is not None: + result['Summary'] = self.summary.to_map() + if self.total is not None: + result['Total'] = self.total return result def from_map(self, m: dict = None): m = m or dict() - if m.get('QuotaId') is not None: - self.quota_id = m.get('QuotaId') - self.quota_metrics = [] - if m.get('QuotaMetrics') is not None: - for k in m.get('QuotaMetrics'): - temp_model = QuotaMetric() - self.quota_metrics.append(temp_model.from_map(k)) + self.job_metrics = [] + if m.get('JobMetrics') is not None: + for k in m.get('JobMetrics'): + temp_model = JobViewMetric() + self.job_metrics.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('Summary') is not None: + temp_model = JobViewMetric() + self.summary = temp_model.from_map(m['Summary']) + if m.get('Total') is not None: + self.total = m.get('Total') return self -class GetQuotaMetricsResponse(TeaModel): +class GetJobViewMetricsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetQuotaMetricsResponseBody = None, + body: GetJobViewMetricsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -8053,25 +8375,21 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetQuotaMetricsResponseBody() + temp_model = GetJobViewMetricsResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetQuotaNodeMetricsRequest(TeaModel): +class GetJobsStatisticsByQuotaRequest(TeaModel): def __init__( self, end_time: str = None, - gputype: str = None, start_time: str = None, - time_step: str = None, - verbose: bool = None, + workspace_id: str = None, ): self.end_time = end_time - self.gputype = gputype self.start_time = start_time - self.time_step = time_step - self.verbose = verbose + self.workspace_id = workspace_id def validate(self): pass @@ -8084,49 +8402,36 @@ def to_map(self): result = dict() if self.end_time is not None: result['EndTime'] = self.end_time - if self.gputype is not None: - result['GPUType'] = self.gputype if self.start_time is not None: result['StartTime'] = self.start_time - if self.time_step is not None: - result['TimeStep'] = self.time_step - if self.verbose is not None: - result['Verbose'] = self.verbose + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() if m.get('EndTime') is not None: self.end_time = m.get('EndTime') - if m.get('GPUType') is not None: - self.gputype = m.get('GPUType') if m.get('StartTime') is not None: self.start_time = m.get('StartTime') - if m.get('TimeStep') is not None: - self.time_step = m.get('TimeStep') - if m.get('Verbose') is not None: - self.verbose = m.get('Verbose') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class GetQuotaNodeMetricsResponseBody(TeaModel): +class GetJobsStatisticsByQuotaResponseBody(TeaModel): def __init__( self, - metric_type: str = None, - nodes_metrics: List[NodeMetric] = None, quota_id: str = None, request_id: str = None, + statistics: Dict[str, Any] = None, ): - self.metric_type = metric_type - self.nodes_metrics = nodes_metrics self.quota_id = quota_id self.request_id = request_id + self.statistics = statistics def validate(self): - if self.nodes_metrics: - for k in self.nodes_metrics: - if k: - k.validate() + pass def to_map(self): _map = super().to_map() @@ -8134,49 +8439,37 @@ def to_map(self): return _map result = dict() - if self.metric_type is not None: - result['MetricType'] = self.metric_type - result['NodesMetrics'] = [] - if self.nodes_metrics is not None: - for k in self.nodes_metrics: - result['NodesMetrics'].append(k.to_map() if k else None) if self.quota_id is not None: result['QuotaId'] = self.quota_id if self.request_id is not None: result['RequestId'] = self.request_id + if self.statistics is not None: + result['Statistics'] = self.statistics return result def from_map(self, m: dict = None): m = m or dict() - if m.get('MetricType') is not None: - self.metric_type = m.get('MetricType') - self.nodes_metrics = [] - if m.get('NodesMetrics') is not None: - for k in m.get('NodesMetrics'): - temp_model = NodeMetric() - self.nodes_metrics.append(temp_model.from_map(k)) if m.get('QuotaId') is not None: self.quota_id = m.get('QuotaId') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('Statistics') is not None: + self.statistics = m.get('Statistics') return self -class GetQuotaNodeMetricsResponse(TeaModel): +class GetJobsStatisticsByQuotaResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetQuotaNodeMetricsResponseBody = None, + body: GetJobsStatisticsByQuotaResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -8201,24 +8494,20 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetQuotaNodeMetricsResponseBody() + temp_model = GetJobsStatisticsByQuotaResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetQuotaNodeViewMetricsRequest(TeaModel): +class GetJobsStatisticsByResourceGroupRequest(TeaModel): def __init__( self, - node_id: str = None, - page_number: int = None, - page_size: int = None, - time_step: str = None, + end_time: str = None, + start_time: str = None, workspace_id: str = None, ): - self.node_id = node_id - self.page_number = page_number - self.page_size = page_size - self.time_step = time_step + self.end_time = end_time + self.start_time = start_time self.workspace_id = workspace_id def validate(self): @@ -8230,51 +8519,36 @@ def to_map(self): return _map result = dict() - if self.node_id is not None: - result['NodeId'] = self.node_id - if self.page_number is not None: - result['PageNumber'] = self.page_number - if self.page_size is not None: - result['PageSize'] = self.page_size - if self.time_step is not None: - result['TimeStep'] = self.time_step + if self.end_time is not None: + result['EndTime'] = self.end_time + if self.start_time is not None: + result['StartTime'] = self.start_time if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + result['WorkspaceID'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('NodeId') is not None: - self.node_id = m.get('NodeId') - if m.get('PageNumber') is not None: - self.page_number = m.get('PageNumber') - if m.get('PageSize') is not None: - self.page_size = m.get('PageSize') - if m.get('TimeStep') is not None: - self.time_step = m.get('TimeStep') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') + if m.get('EndTime') is not None: + self.end_time = m.get('EndTime') + if m.get('StartTime') is not None: + self.start_time = m.get('StartTime') + if m.get('WorkspaceID') is not None: + self.workspace_id = m.get('WorkspaceID') return self -class GetQuotaNodeViewMetricsResponseBody(TeaModel): +class GetJobsStatisticsByResourceGroupResponseBody(TeaModel): def __init__( self, - node_metrics: List[NodeViewMetric] = None, - quota_id: str = None, request_id: str = None, - total_count: int = None, + statistics: Dict[str, Any] = None, ): - self.node_metrics = node_metrics - self.quota_id = quota_id self.request_id = request_id - self.total_count = total_count + self.statistics = statistics def validate(self): - if self.node_metrics: - for k in self.node_metrics: - if k: - k.validate() + pass def to_map(self): _map = super().to_map() @@ -8282,49 +8556,33 @@ def to_map(self): return _map result = dict() - result['NodeMetrics'] = [] - if self.node_metrics is not None: - for k in self.node_metrics: - result['NodeMetrics'].append(k.to_map() if k else None) - if self.quota_id is not None: - result['QuotaId'] = self.quota_id if self.request_id is not None: result['RequestId'] = self.request_id - if self.total_count is not None: - result['TotalCount'] = self.total_count + if self.statistics is not None: + result['Statistics'] = self.statistics return result def from_map(self, m: dict = None): m = m or dict() - self.node_metrics = [] - if m.get('NodeMetrics') is not None: - for k in m.get('NodeMetrics'): - temp_model = NodeViewMetric() - self.node_metrics.append(temp_model.from_map(k)) - if m.get('QuotaId') is not None: - self.quota_id = m.get('QuotaId') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('TotalCount') is not None: - self.total_count = m.get('TotalCount') + if m.get('Statistics') is not None: + self.statistics = m.get('Statistics') return self -class GetQuotaNodeViewMetricsResponse(TeaModel): +class GetJobsStatisticsByResourceGroupResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetQuotaNodeViewMetricsResponseBody = None, + body: GetJobsStatisticsByResourceGroupResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -8349,31 +8607,51 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetQuotaNodeViewMetricsResponseBody() + temp_model = GetJobsStatisticsByResourceGroupResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetQuotaRangeUserViewMetricsRequest(TeaModel): +class GetMachineGroupResponseBody(TeaModel): def __init__( self, - end_time: str = None, - order: str = None, - page_number: int = None, - page_size: int = None, - sort_by: str = None, - start_time: str = None, - user_id: str = None, - workspace_id: str = None, + count: int = None, + default_driver: str = None, + duration: str = None, + ecs_type: str = None, + gmt_created: str = None, + gmt_expired: str = None, + gmt_modified: str = None, + gmt_started: str = None, + machine_group_id: str = None, + order_id: str = None, + order_instance_id: str = None, + pairesource_id: str = None, + pay_type: str = None, + pricing_cycle: str = None, + region_id: str = None, + request_id: str = None, + status: str = None, + supported_drivers: List[str] = None, ): - self.end_time = end_time - self.order = order - self.page_number = page_number - self.page_size = page_size - self.sort_by = sort_by - self.start_time = start_time - self.user_id = user_id - self.workspace_id = workspace_id + self.count = count + self.default_driver = default_driver + self.duration = duration + self.ecs_type = ecs_type + self.gmt_created = gmt_created + self.gmt_expired = gmt_expired + self.gmt_modified = gmt_modified + self.gmt_started = gmt_started + self.machine_group_id = machine_group_id + self.order_id = order_id + self.order_instance_id = order_instance_id + self.pairesource_id = pairesource_id + self.pay_type = pay_type + self.pricing_cycle = pricing_cycle + self.region_id = region_id + self.request_id = request_id + self.status = status + self.supported_drivers = supported_drivers def validate(self): pass @@ -8384,67 +8662,99 @@ def to_map(self): return _map result = dict() - if self.end_time is not None: - result['EndTime'] = self.end_time - if self.order is not None: - result['Order'] = self.order - if self.page_number is not None: - result['PageNumber'] = self.page_number - if self.page_size is not None: - result['PageSize'] = self.page_size - if self.sort_by is not None: - result['SortBy'] = self.sort_by - if self.start_time is not None: - result['StartTime'] = self.start_time - if self.user_id is not None: - result['UserId'] = self.user_id - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.count is not None: + result['Count'] = self.count + if self.default_driver is not None: + result['DefaultDriver'] = self.default_driver + if self.duration is not None: + result['Duration'] = self.duration + if self.ecs_type is not None: + result['EcsType'] = self.ecs_type + if self.gmt_created is not None: + result['GmtCreated'] = self.gmt_created + if self.gmt_expired is not None: + result['GmtExpired'] = self.gmt_expired + if self.gmt_modified is not None: + result['GmtModified'] = self.gmt_modified + if self.gmt_started is not None: + result['GmtStarted'] = self.gmt_started + if self.machine_group_id is not None: + result['MachineGroupID'] = self.machine_group_id + if self.order_id is not None: + result['OrderID'] = self.order_id + if self.order_instance_id is not None: + result['OrderInstanceId'] = self.order_instance_id + if self.pairesource_id is not None: + result['PAIResourceID'] = self.pairesource_id + if self.pay_type is not None: + result['PayType'] = self.pay_type + if self.pricing_cycle is not None: + result['PricingCycle'] = self.pricing_cycle + if self.region_id is not None: + result['RegionID'] = self.region_id + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.status is not None: + result['Status'] = self.status + if self.supported_drivers is not None: + result['SupportedDrivers'] = self.supported_drivers return result def from_map(self, m: dict = None): m = m or dict() - if m.get('EndTime') is not None: - self.end_time = m.get('EndTime') - if m.get('Order') is not None: - self.order = m.get('Order') - if m.get('PageNumber') is not None: - self.page_number = m.get('PageNumber') - if m.get('PageSize') is not None: - self.page_size = m.get('PageSize') - if m.get('SortBy') is not None: - self.sort_by = m.get('SortBy') - if m.get('StartTime') is not None: - self.start_time = m.get('StartTime') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') + if m.get('Count') is not None: + self.count = m.get('Count') + if m.get('DefaultDriver') is not None: + self.default_driver = m.get('DefaultDriver') + if m.get('Duration') is not None: + self.duration = m.get('Duration') + if m.get('EcsType') is not None: + self.ecs_type = m.get('EcsType') + if m.get('GmtCreated') is not None: + self.gmt_created = m.get('GmtCreated') + if m.get('GmtExpired') is not None: + self.gmt_expired = m.get('GmtExpired') + if m.get('GmtModified') is not None: + self.gmt_modified = m.get('GmtModified') + if m.get('GmtStarted') is not None: + self.gmt_started = m.get('GmtStarted') + if m.get('MachineGroupID') is not None: + self.machine_group_id = m.get('MachineGroupID') + if m.get('OrderID') is not None: + self.order_id = m.get('OrderID') + if m.get('OrderInstanceId') is not None: + self.order_instance_id = m.get('OrderInstanceId') + if m.get('PAIResourceID') is not None: + self.pairesource_id = m.get('PAIResourceID') + if m.get('PayType') is not None: + self.pay_type = m.get('PayType') + if m.get('PricingCycle') is not None: + self.pricing_cycle = m.get('PricingCycle') + if m.get('RegionID') is not None: + self.region_id = m.get('RegionID') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('Status') is not None: + self.status = m.get('Status') + if m.get('SupportedDrivers') is not None: + self.supported_drivers = m.get('SupportedDrivers') return self -class GetQuotaRangeUserViewMetricsResponseBody(TeaModel): +class GetMachineGroupResponse(TeaModel): def __init__( self, - quota_id: str = None, - request_id: str = None, - summary: QuotaUserViewMetric = None, - total_count: int = None, - user_metrics: List[QuotaUserViewMetric] = None, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetMachineGroupResponseBody = None, ): - self.quota_id = quota_id - self.request_id = request_id - self.summary = summary - self.total_count = total_count - self.user_metrics = user_metrics + self.headers = headers + self.status_code = status_code + self.body = body def validate(self): - if self.summary: - self.summary.validate() - if self.user_metrics: - for k in self.user_metrics: - if k: - k.validate() + if self.body: + self.body.validate() def to_map(self): _map = super().to_map() @@ -8452,69 +8762,12 @@ def to_map(self): return _map result = dict() - if self.quota_id is not None: - result['QuotaId'] = self.quota_id - if self.request_id is not None: - result['RequestId'] = self.request_id - if self.summary is not None: - result['Summary'] = self.summary.to_map() - if self.total_count is not None: - result['TotalCount'] = self.total_count - result['UserMetrics'] = [] - if self.user_metrics is not None: - for k in self.user_metrics: - result['UserMetrics'].append(k.to_map() if k else None) - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('QuotaId') is not None: - self.quota_id = m.get('QuotaId') - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') - if m.get('Summary') is not None: - temp_model = QuotaUserViewMetric() - self.summary = temp_model.from_map(m['Summary']) - if m.get('TotalCount') is not None: - self.total_count = m.get('TotalCount') - self.user_metrics = [] - if m.get('UserMetrics') is not None: - for k in m.get('UserMetrics'): - temp_model = QuotaUserViewMetric() - self.user_metrics.append(temp_model.from_map(k)) - return self - - -class GetQuotaRangeUserViewMetricsResponse(TeaModel): - def __init__( - self, - headers: Dict[str, str] = None, - status_code: int = None, - body: GetQuotaRangeUserViewMetricsResponseBody = None, - ): - self.headers = headers - self.status_code = status_code - self.body = body - - def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() return result def from_map(self, m: dict = None): @@ -8524,29 +8777,35 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetQuotaRangeUserViewMetricsResponseBody() + temp_model = GetMachineGroupResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetQuotaUserViewMetricsRequest(TeaModel): +class GetMetricsRequest(TeaModel): def __init__( self, - order: str = None, - page_number: str = None, - page_size: str = None, - sort_by: str = None, - time_step: str = None, - user_id: str = None, - workspace_id: str = None, + dimensions: str = None, + end_time: str = None, + express: str = None, + length: str = None, + metric_name: str = None, + namespace: str = None, + next_token: str = None, + period: str = None, + start_time: str = None, ): - self.order = order - self.page_number = page_number - self.page_size = page_size - self.sort_by = sort_by - self.time_step = time_step - self.user_id = user_id - self.workspace_id = workspace_id + # This parameter is required. + self.dimensions = dimensions + self.end_time = end_time + self.express = express + self.length = length + # This parameter is required. + self.metric_name = metric_name + self.namespace = namespace + self.next_token = next_token + self.period = period + self.start_time = start_time def validate(self): pass @@ -8557,63 +8816,70 @@ def to_map(self): return _map result = dict() - if self.order is not None: - result['Order'] = self.order - if self.page_number is not None: - result['PageNumber'] = self.page_number - if self.page_size is not None: - result['PageSize'] = self.page_size - if self.sort_by is not None: - result['SortBy'] = self.sort_by - if self.time_step is not None: - result['TimeStep'] = self.time_step - if self.user_id is not None: - result['UserId'] = self.user_id - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.dimensions is not None: + result['Dimensions'] = self.dimensions + if self.end_time is not None: + result['EndTime'] = self.end_time + if self.express is not None: + result['Express'] = self.express + if self.length is not None: + result['Length'] = self.length + if self.metric_name is not None: + result['MetricName'] = self.metric_name + if self.namespace is not None: + result['Namespace'] = self.namespace + if self.next_token is not None: + result['NextToken'] = self.next_token + if self.period is not None: + result['Period'] = self.period + if self.start_time is not None: + result['StartTime'] = self.start_time return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Order') is not None: - self.order = m.get('Order') - if m.get('PageNumber') is not None: - self.page_number = m.get('PageNumber') - if m.get('PageSize') is not None: - self.page_size = m.get('PageSize') - if m.get('SortBy') is not None: - self.sort_by = m.get('SortBy') - if m.get('TimeStep') is not None: - self.time_step = m.get('TimeStep') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') + if m.get('Dimensions') is not None: + self.dimensions = m.get('Dimensions') + if m.get('EndTime') is not None: + self.end_time = m.get('EndTime') + if m.get('Express') is not None: + self.express = m.get('Express') + if m.get('Length') is not None: + self.length = m.get('Length') + if m.get('MetricName') is not None: + self.metric_name = m.get('MetricName') + if m.get('Namespace') is not None: + self.namespace = m.get('Namespace') + if m.get('NextToken') is not None: + self.next_token = m.get('NextToken') + if m.get('Period') is not None: + self.period = m.get('Period') + if m.get('StartTime') is not None: + self.start_time = m.get('StartTime') return self -class GetQuotaUserViewMetricsResponseBody(TeaModel): +class GetMetricsResponseBody(TeaModel): def __init__( self, - quota_id: str = None, + code: str = None, + datapoints: str = None, + message: str = None, + next_token: str = None, + period: str = None, request_id: str = None, - summary: QuotaUserViewMetric = None, - total_count: int = None, - user_metrics: List[QuotaUserViewMetric] = None, + success: bool = None, ): - self.quota_id = quota_id + self.code = code + self.datapoints = datapoints + self.message = message + self.next_token = next_token + self.period = period self.request_id = request_id - self.summary = summary - self.total_count = total_count - self.user_metrics = user_metrics + self.success = success def validate(self): - if self.summary: - self.summary.validate() - if self.user_metrics: - for k in self.user_metrics: - if k: - k.validate() + pass def to_map(self): _map = super().to_map() @@ -8621,54 +8887,53 @@ def to_map(self): return _map result = dict() - if self.quota_id is not None: - result['QuotaId'] = self.quota_id + if self.code is not None: + result['Code'] = self.code + if self.datapoints is not None: + result['Datapoints'] = self.datapoints + if self.message is not None: + result['Message'] = self.message + if self.next_token is not None: + result['NextToken'] = self.next_token + if self.period is not None: + result['Period'] = self.period if self.request_id is not None: result['RequestId'] = self.request_id - if self.summary is not None: - result['Summary'] = self.summary.to_map() - if self.total_count is not None: - result['TotalCount'] = self.total_count - result['UserMetrics'] = [] - if self.user_metrics is not None: - for k in self.user_metrics: - result['UserMetrics'].append(k.to_map() if k else None) + if self.success is not None: + result['Success'] = self.success return result def from_map(self, m: dict = None): m = m or dict() - if m.get('QuotaId') is not None: - self.quota_id = m.get('QuotaId') + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('Datapoints') is not None: + self.datapoints = m.get('Datapoints') + if m.get('Message') is not None: + self.message = m.get('Message') + if m.get('NextToken') is not None: + self.next_token = m.get('NextToken') + if m.get('Period') is not None: + self.period = m.get('Period') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('Summary') is not None: - temp_model = QuotaUserViewMetric() - self.summary = temp_model.from_map(m['Summary']) - if m.get('TotalCount') is not None: - self.total_count = m.get('TotalCount') - self.user_metrics = [] - if m.get('UserMetrics') is not None: - for k in m.get('UserMetrics'): - temp_model = QuotaUserViewMetric() - self.user_metrics.append(temp_model.from_map(k)) + if m.get('Success') is not None: + self.success = m.get('Success') return self -class GetQuotaUserViewMetricsResponse(TeaModel): +class GetMetricsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetQuotaUserViewMetricsResponseBody = None, + body: GetMetricsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -8693,31 +8958,25 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetQuotaUserViewMetricsResponseBody() + temp_model = GetMetricsResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetRangeUserViewMetricsRequest(TeaModel): +class GetNodeGPUMetricsRequest(TeaModel): def __init__( self, end_time: str = None, - order: str = None, - page_number: int = None, - page_size: int = None, - sort_by: str = None, + metric_type: str = None, + quota_id: str = None, start_time: str = None, - user_id: str = None, - workspace_id: str = None, ): self.end_time = end_time - self.order = order - self.page_number = page_number - self.page_size = page_size - self.sort_by = sort_by + # This parameter is required. + self.metric_type = metric_type + # This parameter is required. + self.quota_id = quota_id self.start_time = start_time - self.user_id = user_id - self.workspace_id = workspace_id def validate(self): pass @@ -8730,61 +8989,41 @@ def to_map(self): result = dict() if self.end_time is not None: result['EndTime'] = self.end_time - if self.order is not None: - result['Order'] = self.order - if self.page_number is not None: - result['PageNumber'] = self.page_number - if self.page_size is not None: - result['PageSize'] = self.page_size - if self.sort_by is not None: - result['SortBy'] = self.sort_by + if self.metric_type is not None: + result['MetricType'] = self.metric_type + if self.quota_id is not None: + result['QuotaId'] = self.quota_id if self.start_time is not None: result['StartTime'] = self.start_time - if self.user_id is not None: - result['UserId'] = self.user_id - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() if m.get('EndTime') is not None: self.end_time = m.get('EndTime') - if m.get('Order') is not None: - self.order = m.get('Order') - if m.get('PageNumber') is not None: - self.page_number = m.get('PageNumber') - if m.get('PageSize') is not None: - self.page_size = m.get('PageSize') - if m.get('SortBy') is not None: - self.sort_by = m.get('SortBy') + if m.get('MetricType') is not None: + self.metric_type = m.get('MetricType') + if m.get('QuotaId') is not None: + self.quota_id = m.get('QuotaId') if m.get('StartTime') is not None: self.start_time = m.get('StartTime') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') return self -class GetRangeUserViewMetricsResponseBody(TeaModel): +class GetNodeGPUMetricsResponseBody(TeaModel): def __init__( self, - summary: UserViewMetric = None, - user_metrics: List[UserViewMetric] = None, + metric_type: str = None, + node_gpumetric: NodeGPUMetric = None, request_id: str = None, ): - self.summary = summary - self.user_metrics = user_metrics + self.metric_type = metric_type + self.node_gpumetric = node_gpumetric self.request_id = request_id def validate(self): - if self.summary: - self.summary.validate() - if self.user_metrics: - for k in self.user_metrics: - if k: - k.validate() + if self.node_gpumetric: + self.node_gpumetric.validate() def to_map(self): _map = super().to_map() @@ -8792,46 +9031,38 @@ def to_map(self): return _map result = dict() - if self.summary is not None: - result['Summary'] = self.summary.to_map() - result['UserMetrics'] = [] - if self.user_metrics is not None: - for k in self.user_metrics: - result['UserMetrics'].append(k.to_map() if k else None) + if self.metric_type is not None: + result['MetricType'] = self.metric_type + if self.node_gpumetric is not None: + result['NodeGPUMetric'] = self.node_gpumetric.to_map() if self.request_id is not None: result['requestId'] = self.request_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Summary') is not None: - temp_model = UserViewMetric() - self.summary = temp_model.from_map(m['Summary']) - self.user_metrics = [] - if m.get('UserMetrics') is not None: - for k in m.get('UserMetrics'): - temp_model = UserViewMetric() - self.user_metrics.append(temp_model.from_map(k)) + if m.get('MetricType') is not None: + self.metric_type = m.get('MetricType') + if m.get('NodeGPUMetric') is not None: + temp_model = NodeGPUMetric() + self.node_gpumetric = temp_model.from_map(m['NodeGPUMetric']) if m.get('requestId') is not None: self.request_id = m.get('requestId') return self -class GetRangeUserViewMetricsResponse(TeaModel): +class GetNodeGPUMetricsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetRangeUserViewMetricsResponseBody = None, + body: GetNodeGPUMetricsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -8856,17 +9087,25 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetRangeUserViewMetricsResponseBody() + temp_model = GetNodeGPUMetricsResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetResourceGroupRequest(TeaModel): +class GetNodeMetricsRequest(TeaModel): def __init__( self, - is_aiworkspace_data_enabled: bool = None, + end_time: str = None, + gputype: str = None, + start_time: str = None, + time_step: str = None, + verbose: bool = None, ): - self.is_aiworkspace_data_enabled = is_aiworkspace_data_enabled + self.end_time = end_time + self.gputype = gputype + self.start_time = start_time + self.time_step = time_step + self.verbose = verbose def validate(self): pass @@ -8877,49 +9116,49 @@ def to_map(self): return _map result = dict() - if self.is_aiworkspace_data_enabled is not None: - result['IsAIWorkspaceDataEnabled'] = self.is_aiworkspace_data_enabled + if self.end_time is not None: + result['EndTime'] = self.end_time + if self.gputype is not None: + result['GPUType'] = self.gputype + if self.start_time is not None: + result['StartTime'] = self.start_time + if self.time_step is not None: + result['TimeStep'] = self.time_step + if self.verbose is not None: + result['Verbose'] = self.verbose return result def from_map(self, m: dict = None): m = m or dict() - if m.get('IsAIWorkspaceDataEnabled') is not None: - self.is_aiworkspace_data_enabled = m.get('IsAIWorkspaceDataEnabled') - return self - - -class GetResourceGroupResponseBody(TeaModel): + if m.get('EndTime') is not None: + self.end_time = m.get('EndTime') + if m.get('GPUType') is not None: + self.gputype = m.get('GPUType') + if m.get('StartTime') is not None: + self.start_time = m.get('StartTime') + if m.get('TimeStep') is not None: + self.time_step = m.get('TimeStep') + if m.get('Verbose') is not None: + self.verbose = m.get('Verbose') + return self + + +class GetNodeMetricsResponseBody(TeaModel): def __init__( self, - cluster_id: str = None, - computing_resource_provider: str = None, - creator_id: str = None, - gmt_created_time: str = None, - gmt_modified_time: str = None, - name: str = None, - request_id: str = None, - resource_type: str = None, - status: str = None, - support_rdma: bool = None, - user_vpc: UserVpc = None, - workspace_id: str = None, + metric_type: str = None, + nodes_metrics: List[NodeMetric] = None, + resource_group_id: str = None, ): - self.cluster_id = cluster_id - self.computing_resource_provider = computing_resource_provider - self.creator_id = creator_id - self.gmt_created_time = gmt_created_time - self.gmt_modified_time = gmt_modified_time - self.name = name - self.request_id = request_id - self.resource_type = resource_type - self.status = status - self.support_rdma = support_rdma - self.user_vpc = user_vpc - self.workspace_id = workspace_id + self.metric_type = metric_type + self.nodes_metrics = nodes_metrics + self.resource_group_id = resource_group_id def validate(self): - if self.user_vpc: - self.user_vpc.validate() + if self.nodes_metrics: + for k in self.nodes_metrics: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -8927,77 +9166,42 @@ def to_map(self): return _map result = dict() - if self.cluster_id is not None: - result['ClusterID'] = self.cluster_id - if self.computing_resource_provider is not None: - result['ComputingResourceProvider'] = self.computing_resource_provider - if self.creator_id is not None: - result['CreatorID'] = self.creator_id - if self.gmt_created_time is not None: - result['GmtCreatedTime'] = self.gmt_created_time - if self.gmt_modified_time is not None: - result['GmtModifiedTime'] = self.gmt_modified_time - if self.name is not None: - result['Name'] = self.name - if self.request_id is not None: - result['RequestId'] = self.request_id - if self.resource_type is not None: - result['ResourceType'] = self.resource_type - if self.status is not None: - result['Status'] = self.status - if self.support_rdma is not None: - result['SupportRDMA'] = self.support_rdma - if self.user_vpc is not None: - result['UserVpc'] = self.user_vpc.to_map() - if self.workspace_id is not None: - result['WorkspaceID'] = self.workspace_id + if self.metric_type is not None: + result['MetricType'] = self.metric_type + result['NodesMetrics'] = [] + if self.nodes_metrics is not None: + for k in self.nodes_metrics: + result['NodesMetrics'].append(k.to_map() if k else None) + if self.resource_group_id is not None: + result['ResourceGroupID'] = self.resource_group_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ClusterID') is not None: - self.cluster_id = m.get('ClusterID') - if m.get('ComputingResourceProvider') is not None: - self.computing_resource_provider = m.get('ComputingResourceProvider') - if m.get('CreatorID') is not None: - self.creator_id = m.get('CreatorID') - if m.get('GmtCreatedTime') is not None: - self.gmt_created_time = m.get('GmtCreatedTime') - if m.get('GmtModifiedTime') is not None: - self.gmt_modified_time = m.get('GmtModifiedTime') - if m.get('Name') is not None: - self.name = m.get('Name') - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') - if m.get('ResourceType') is not None: - self.resource_type = m.get('ResourceType') - if m.get('Status') is not None: - self.status = m.get('Status') - if m.get('SupportRDMA') is not None: - self.support_rdma = m.get('SupportRDMA') - if m.get('UserVpc') is not None: - temp_model = UserVpc() - self.user_vpc = temp_model.from_map(m['UserVpc']) - if m.get('WorkspaceID') is not None: - self.workspace_id = m.get('WorkspaceID') + if m.get('MetricType') is not None: + self.metric_type = m.get('MetricType') + self.nodes_metrics = [] + if m.get('NodesMetrics') is not None: + for k in m.get('NodesMetrics'): + temp_model = NodeMetric() + self.nodes_metrics.append(temp_model.from_map(k)) + if m.get('ResourceGroupID') is not None: + self.resource_group_id = m.get('ResourceGroupID') return self -class GetResourceGroupResponse(TeaModel): +class GetNodeMetricsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetResourceGroupResponseBody = None, + body: GetNodeMetricsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -9022,53 +9226,27 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetResourceGroupResponseBody() + temp_model = GetNodeMetricsResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetResourceGroupMachineGroupResponseBody(TeaModel): +class GetNodeViewMetricsRequest(TeaModel): def __init__( self, - cpu: str = None, - default_driver: str = None, - ecs_count: int = None, - ecs_spec: str = None, - gmt_created_time: str = None, - gmt_expired_time: str = None, - gmt_modified_time: str = None, - gmt_started_time: str = None, - gpu: str = None, - gpu_type: str = None, - machine_group_id: str = None, - memory: str = None, - payment_duration: str = None, - payment_duration_unit: str = None, - payment_type: str = None, - request_id: str = None, - resource_group_id: str = None, - status: str = None, - supported_drivers: List[str] = None, + node_id: str = None, + page_number: int = None, + page_size: int = None, + time_step: str = None, + workspace_id: str = None, ): - self.cpu = cpu - self.default_driver = default_driver - self.ecs_count = ecs_count - self.ecs_spec = ecs_spec - self.gmt_created_time = gmt_created_time - self.gmt_expired_time = gmt_expired_time - self.gmt_modified_time = gmt_modified_time - self.gmt_started_time = gmt_started_time - self.gpu = gpu - self.gpu_type = gpu_type - self.machine_group_id = machine_group_id - self.memory = memory - self.payment_duration = payment_duration - self.payment_duration_unit = payment_duration_unit - self.payment_type = payment_type - self.request_id = request_id - self.resource_group_id = resource_group_id - self.status = status - self.supported_drivers = supported_drivers + self.node_id = node_id + # This parameter is required. + self.page_number = page_number + # This parameter is required. + self.page_size = page_size + self.time_step = time_step + self.workspace_id = workspace_id def validate(self): pass @@ -9079,106 +9257,47 @@ def to_map(self): return _map result = dict() - if self.cpu is not None: - result['Cpu'] = self.cpu - if self.default_driver is not None: - result['DefaultDriver'] = self.default_driver - if self.ecs_count is not None: - result['EcsCount'] = self.ecs_count - if self.ecs_spec is not None: - result['EcsSpec'] = self.ecs_spec - if self.gmt_created_time is not None: - result['GmtCreatedTime'] = self.gmt_created_time - if self.gmt_expired_time is not None: - result['GmtExpiredTime'] = self.gmt_expired_time - if self.gmt_modified_time is not None: - result['GmtModifiedTime'] = self.gmt_modified_time - if self.gmt_started_time is not None: - result['GmtStartedTime'] = self.gmt_started_time - if self.gpu is not None: - result['Gpu'] = self.gpu - if self.gpu_type is not None: - result['GpuType'] = self.gpu_type - if self.machine_group_id is not None: - result['MachineGroupID'] = self.machine_group_id - if self.memory is not None: - result['Memory'] = self.memory - if self.payment_duration is not None: - result['PaymentDuration'] = self.payment_duration - if self.payment_duration_unit is not None: - result['PaymentDurationUnit'] = self.payment_duration_unit - if self.payment_type is not None: - result['PaymentType'] = self.payment_type - if self.request_id is not None: - result['RequestId'] = self.request_id - if self.resource_group_id is not None: - result['ResourceGroupID'] = self.resource_group_id - if self.status is not None: - result['Status'] = self.status - if self.supported_drivers is not None: - result['SupportedDrivers'] = self.supported_drivers + if self.node_id is not None: + result['NodeId'] = self.node_id + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.time_step is not None: + result['TimeStep'] = self.time_step + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Cpu') is not None: - self.cpu = m.get('Cpu') - if m.get('DefaultDriver') is not None: - self.default_driver = m.get('DefaultDriver') - if m.get('EcsCount') is not None: - self.ecs_count = m.get('EcsCount') - if m.get('EcsSpec') is not None: - self.ecs_spec = m.get('EcsSpec') - if m.get('GmtCreatedTime') is not None: - self.gmt_created_time = m.get('GmtCreatedTime') - if m.get('GmtExpiredTime') is not None: - self.gmt_expired_time = m.get('GmtExpiredTime') - if m.get('GmtModifiedTime') is not None: - self.gmt_modified_time = m.get('GmtModifiedTime') - if m.get('GmtStartedTime') is not None: - self.gmt_started_time = m.get('GmtStartedTime') - if m.get('Gpu') is not None: - self.gpu = m.get('Gpu') - if m.get('GpuType') is not None: - self.gpu_type = m.get('GpuType') - if m.get('MachineGroupID') is not None: - self.machine_group_id = m.get('MachineGroupID') - if m.get('Memory') is not None: - self.memory = m.get('Memory') - if m.get('PaymentDuration') is not None: - self.payment_duration = m.get('PaymentDuration') - if m.get('PaymentDurationUnit') is not None: - self.payment_duration_unit = m.get('PaymentDurationUnit') - if m.get('PaymentType') is not None: - self.payment_type = m.get('PaymentType') - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') - if m.get('ResourceGroupID') is not None: - self.resource_group_id = m.get('ResourceGroupID') - if m.get('Status') is not None: - self.status = m.get('Status') - if m.get('SupportedDrivers') is not None: - self.supported_drivers = m.get('SupportedDrivers') + if m.get('NodeId') is not None: + self.node_id = m.get('NodeId') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('TimeStep') is not None: + self.time_step = m.get('TimeStep') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class GetResourceGroupMachineGroupResponse(TeaModel): +class GetNodeViewMetricsResponseBody(TeaModel): def __init__( self, - headers: Dict[str, str] = None, - status_code: int = None, - body: GetResourceGroupMachineGroupResponseBody = None, + node_metrics: List[NodeViewMetric] = None, + total: int = None, ): - self.headers = headers - self.status_code = status_code - self.body = body + self.node_metrics = node_metrics + self.total = total def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() + if self.node_metrics: + for k in self.node_metrics: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -9186,41 +9305,40 @@ def to_map(self): return _map result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + result['NodeMetrics'] = [] + if self.node_metrics is not None: + for k in self.node_metrics: + result['NodeMetrics'].append(k.to_map() if k else None) + if self.total is not None: + result['Total'] = self.total return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = GetResourceGroupMachineGroupResponseBody() - self.body = temp_model.from_map(m['body']) + self.node_metrics = [] + if m.get('NodeMetrics') is not None: + for k in m.get('NodeMetrics'): + temp_model = NodeViewMetric() + self.node_metrics.append(temp_model.from_map(k)) + if m.get('Total') is not None: + self.total = m.get('Total') return self -class GetResourceGroupMetricsRequest(TeaModel): +class GetNodeViewMetricsResponse(TeaModel): def __init__( self, - end_time: str = None, - gputype: str = None, - start_time: str = None, - time_step: str = None, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetNodeViewMetricsResponseBody = None, ): - self.end_time = end_time - self.gputype = gputype - self.start_time = start_time - self.time_step = time_step + self.headers = headers + self.status_code = status_code + self.body = body def validate(self): - pass + if self.body: + self.body.validate() def to_map(self): _map = super().to_map() @@ -9228,45 +9346,63 @@ def to_map(self): return _map result = dict() - if self.end_time is not None: - result['EndTime'] = self.end_time - if self.gputype is not None: - result['GPUType'] = self.gputype - if self.start_time is not None: - result['StartTime'] = self.start_time - if self.time_step is not None: - result['TimeStep'] = self.time_step + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() return result def from_map(self, m: dict = None): m = m or dict() - if m.get('EndTime') is not None: - self.end_time = m.get('EndTime') - if m.get('GPUType') is not None: - self.gputype = m.get('GPUType') - if m.get('StartTime') is not None: - self.start_time = m.get('StartTime') - if m.get('TimeStep') is not None: - self.time_step = m.get('TimeStep') + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetNodeViewMetricsResponseBody() + self.body = temp_model.from_map(m['body']) return self -class GetResourceGroupMetricsResponseBody(TeaModel): +class GetOperationResponseBody(TeaModel): def __init__( self, + creator_id: str = None, + gmt_created_time: str = None, + gmt_end_time: str = None, + gmt_modified_time: str = None, + gmt_start_time: str = None, + object_id: str = None, + object_type: str = None, + operation_description: str = None, + operation_id: str = None, + operation_spec_json: str = None, + operation_type: str = None, + reason_code: str = None, + reason_message: str = None, request_id: str = None, - resource_group_id: str = None, - resource_group_metrics: List[ResourceGroupMetric] = None, + status: str = None, ): + self.creator_id = creator_id + self.gmt_created_time = gmt_created_time + self.gmt_end_time = gmt_end_time + self.gmt_modified_time = gmt_modified_time + self.gmt_start_time = gmt_start_time + self.object_id = object_id + self.object_type = object_type + self.operation_description = operation_description + self.operation_id = operation_id + self.operation_spec_json = operation_spec_json + self.operation_type = operation_type + self.reason_code = reason_code + self.reason_message = reason_message self.request_id = request_id - self.resource_group_id = resource_group_id - self.resource_group_metrics = resource_group_metrics + self.status = status def validate(self): - if self.resource_group_metrics: - for k in self.resource_group_metrics: - if k: - k.validate() + pass def to_map(self): _map = super().to_map() @@ -9274,45 +9410,85 @@ def to_map(self): return _map result = dict() + if self.creator_id is not None: + result['CreatorId'] = self.creator_id + if self.gmt_created_time is not None: + result['GmtCreatedTime'] = self.gmt_created_time + if self.gmt_end_time is not None: + result['GmtEndTime'] = self.gmt_end_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.gmt_start_time is not None: + result['GmtStartTime'] = self.gmt_start_time + if self.object_id is not None: + result['ObjectId'] = self.object_id + if self.object_type is not None: + result['ObjectType'] = self.object_type + if self.operation_description is not None: + result['OperationDescription'] = self.operation_description + if self.operation_id is not None: + result['OperationId'] = self.operation_id + if self.operation_spec_json is not None: + result['OperationSpecJson'] = self.operation_spec_json + if self.operation_type is not None: + result['OperationType'] = self.operation_type + if self.reason_code is not None: + result['ReasonCode'] = self.reason_code + if self.reason_message is not None: + result['ReasonMessage'] = self.reason_message if self.request_id is not None: result['RequestId'] = self.request_id - if self.resource_group_id is not None: - result['ResourceGroupID'] = self.resource_group_id - result['ResourceGroupMetrics'] = [] - if self.resource_group_metrics is not None: - for k in self.resource_group_metrics: - result['ResourceGroupMetrics'].append(k.to_map() if k else None) + if self.status is not None: + result['Status'] = self.status return result def from_map(self, m: dict = None): m = m or dict() + if m.get('CreatorId') is not None: + self.creator_id = m.get('CreatorId') + if m.get('GmtCreatedTime') is not None: + self.gmt_created_time = m.get('GmtCreatedTime') + if m.get('GmtEndTime') is not None: + self.gmt_end_time = m.get('GmtEndTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('GmtStartTime') is not None: + self.gmt_start_time = m.get('GmtStartTime') + if m.get('ObjectId') is not None: + self.object_id = m.get('ObjectId') + if m.get('ObjectType') is not None: + self.object_type = m.get('ObjectType') + if m.get('OperationDescription') is not None: + self.operation_description = m.get('OperationDescription') + if m.get('OperationId') is not None: + self.operation_id = m.get('OperationId') + if m.get('OperationSpecJson') is not None: + self.operation_spec_json = m.get('OperationSpecJson') + if m.get('OperationType') is not None: + self.operation_type = m.get('OperationType') + if m.get('ReasonCode') is not None: + self.reason_code = m.get('ReasonCode') + if m.get('ReasonMessage') is not None: + self.reason_message = m.get('ReasonMessage') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('ResourceGroupID') is not None: - self.resource_group_id = m.get('ResourceGroupID') - self.resource_group_metrics = [] - if m.get('ResourceGroupMetrics') is not None: - for k in m.get('ResourceGroupMetrics'): - temp_model = ResourceGroupMetric() - self.resource_group_metrics.append(temp_model.from_map(k)) + if m.get('Status') is not None: + self.status = m.get('Status') return self -class GetResourceGroupMetricsResponse(TeaModel): +class GetOperationResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetResourceGroupMetricsResponseBody = None, + body: GetOperationResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -9337,19 +9513,31 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetResourceGroupMetricsResponseBody() + temp_model = GetOperationResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetResourceGroupRequestRequest(TeaModel): +class GetQueueInfosRequest(TeaModel): def __init__( self, - pod_status: str = None, - resource_group_id: str = None, + order: str = None, + page_number: int = None, + page_size: int = None, + quota_ids: str = None, + sort_by: str = None, + workload_ids: str = None, + workload_type: str = None, + workspace_ids: str = None, ): - self.pod_status = pod_status - self.resource_group_id = resource_group_id + self.order = order + self.page_number = page_number + self.page_size = page_size + self.quota_ids = quota_ids + self.sort_by = sort_by + self.workload_ids = workload_ids + self.workload_type = workload_type + self.workspace_ids = workspace_ids def validate(self): pass @@ -9360,37 +9548,59 @@ def to_map(self): return _map result = dict() - if self.pod_status is not None: - result['PodStatus'] = self.pod_status - if self.resource_group_id is not None: - result['ResourceGroupID'] = self.resource_group_id + if self.order is not None: + result['Order'] = self.order + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.quota_ids is not None: + result['QuotaIds'] = self.quota_ids + if self.sort_by is not None: + result['SortBy'] = self.sort_by + if self.workload_ids is not None: + result['WorkloadIds'] = self.workload_ids + if self.workload_type is not None: + result['WorkloadType'] = self.workload_type + if self.workspace_ids is not None: + result['WorkspaceIds'] = self.workspace_ids return result def from_map(self, m: dict = None): m = m or dict() - if m.get('PodStatus') is not None: - self.pod_status = m.get('PodStatus') - if m.get('ResourceGroupID') is not None: - self.resource_group_id = m.get('ResourceGroupID') + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('QuotaIds') is not None: + self.quota_ids = m.get('QuotaIds') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') + if m.get('WorkloadIds') is not None: + self.workload_ids = m.get('WorkloadIds') + if m.get('WorkloadType') is not None: + self.workload_type = m.get('WorkloadType') + if m.get('WorkspaceIds') is not None: + self.workspace_ids = m.get('WorkspaceIds') return self -class GetResourceGroupRequestResponseBody(TeaModel): +class GetQueueInfosResponseBody(TeaModel): def __init__( self, - request_cpu: int = None, - request_gpu: int = None, - request_gpuinfos: List[GPUInfo] = None, - request_memory: int = None, + queue_infos: List[QueueInfo] = None, + request_id: str = None, + total_count: int = None, ): - self.request_cpu = request_cpu - self.request_gpu = request_gpu - self.request_gpuinfos = request_gpuinfos - self.request_memory = request_memory + self.queue_infos = queue_infos + self.request_id = request_id + self.total_count = total_count def validate(self): - if self.request_gpuinfos: - for k in self.request_gpuinfos: + if self.queue_infos: + for k in self.queue_infos: if k: k.validate() @@ -9400,49 +9610,42 @@ def to_map(self): return _map result = dict() - if self.request_cpu is not None: - result['requestCPU'] = self.request_cpu - if self.request_gpu is not None: - result['requestGPU'] = self.request_gpu - result['requestGPUInfos'] = [] - if self.request_gpuinfos is not None: - for k in self.request_gpuinfos: - result['requestGPUInfos'].append(k.to_map() if k else None) - if self.request_memory is not None: - result['requestMemory'] = self.request_memory + result['QueueInfos'] = [] + if self.queue_infos is not None: + for k in self.queue_infos: + result['QueueInfos'].append(k.to_map() if k else None) + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.total_count is not None: + result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() - if m.get('requestCPU') is not None: - self.request_cpu = m.get('requestCPU') - if m.get('requestGPU') is not None: - self.request_gpu = m.get('requestGPU') - self.request_gpuinfos = [] - if m.get('requestGPUInfos') is not None: - for k in m.get('requestGPUInfos'): - temp_model = GPUInfo() - self.request_gpuinfos.append(temp_model.from_map(k)) - if m.get('requestMemory') is not None: - self.request_memory = m.get('requestMemory') + self.queue_infos = [] + if m.get('QueueInfos') is not None: + for k in m.get('QueueInfos'): + temp_model = QueueInfo() + self.queue_infos.append(temp_model.from_map(k)) + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') return self -class GetResourceGroupRequestResponse(TeaModel): +class GetQueueInfosResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetResourceGroupRequestResponseBody = None, + body: GetQueueInfosResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -9467,17 +9670,17 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetResourceGroupRequestResponseBody() + temp_model = GetQueueInfosResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetResourceGroupTotalRequest(TeaModel): +class GetQuotaRequest(TeaModel): def __init__( self, - resource_group_id: str = None, + verbose: bool = None, ): - self.resource_group_id = resource_group_id + self.verbose = verbose def validate(self): pass @@ -9488,33 +9691,84 @@ def to_map(self): return _map result = dict() - if self.resource_group_id is not None: - result['ResourceGroupID'] = self.resource_group_id + if self.verbose is not None: + result['Verbose'] = self.verbose return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ResourceGroupID') is not None: - self.resource_group_id = m.get('ResourceGroupID') + if m.get('Verbose') is not None: + self.verbose = m.get('Verbose') return self -class GetResourceGroupTotalResponseBody(TeaModel): +class GetQuotaResponseBody(TeaModel): def __init__( self, - total_cpu: int = None, - total_gpu: int = None, - total_gpuinfos: List[GPUInfo] = None, - total_memory: int = None, + allocate_strategy: str = None, + creator_id: str = None, + description: str = None, + gmt_created_time: str = None, + gmt_modified_time: str = None, + labels: List[Label] = None, + latest_operation_id: str = None, + min: ResourceSpec = None, + parent_quota_id: str = None, + queue_strategy: str = None, + quota_config: QuotaConfig = None, + quota_details: QuotaDetails = None, + quota_id: str = None, + quota_name: str = None, + reason_code: str = None, + reason_message: str = None, + request_id: str = None, + resource_group_ids: List[str] = None, + resource_type: str = None, + status: str = None, + sub_quotas: List[QuotaIdName] = None, + workspaces: List[WorkspaceIdName] = None, ): - self.total_cpu = total_cpu - self.total_gpu = total_gpu - self.total_gpuinfos = total_gpuinfos - self.total_memory = total_memory + self.allocate_strategy = allocate_strategy + self.creator_id = creator_id + self.description = description + self.gmt_created_time = gmt_created_time + self.gmt_modified_time = gmt_modified_time + self.labels = labels + self.latest_operation_id = latest_operation_id + self.min = min + self.parent_quota_id = parent_quota_id + self.queue_strategy = queue_strategy + self.quota_config = quota_config + self.quota_details = quota_details + # Quota Id + self.quota_id = quota_id + self.quota_name = quota_name + self.reason_code = reason_code + self.reason_message = reason_message + self.request_id = request_id + self.resource_group_ids = resource_group_ids + self.resource_type = resource_type + self.status = status + self.sub_quotas = sub_quotas + self.workspaces = workspaces def validate(self): - if self.total_gpuinfos: - for k in self.total_gpuinfos: + if self.labels: + for k in self.labels: + if k: + k.validate() + if self.min: + self.min.validate() + if self.quota_config: + self.quota_config.validate() + if self.quota_details: + self.quota_details.validate() + if self.sub_quotas: + for k in self.sub_quotas: + if k: + k.validate() + if self.workspaces: + for k in self.workspaces: if k: k.validate() @@ -9524,126 +9778,131 @@ def to_map(self): return _map result = dict() - if self.total_cpu is not None: - result['totalCPU'] = self.total_cpu - if self.total_gpu is not None: - result['totalGPU'] = self.total_gpu - result['totalGPUInfos'] = [] - if self.total_gpuinfos is not None: - for k in self.total_gpuinfos: - result['totalGPUInfos'].append(k.to_map() if k else None) - if self.total_memory is not None: - result['totalMemory'] = self.total_memory - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('totalCPU') is not None: - self.total_cpu = m.get('totalCPU') - if m.get('totalGPU') is not None: - self.total_gpu = m.get('totalGPU') - self.total_gpuinfos = [] - if m.get('totalGPUInfos') is not None: - for k in m.get('totalGPUInfos'): - temp_model = GPUInfo() - self.total_gpuinfos.append(temp_model.from_map(k)) - if m.get('totalMemory') is not None: - self.total_memory = m.get('totalMemory') - return self - - -class GetResourceGroupTotalResponse(TeaModel): - def __init__( - self, - headers: Dict[str, str] = None, - status_code: int = None, - body: GetResourceGroupTotalResponseBody = None, - ): - self.headers = headers - self.status_code = status_code - self.body = body - - def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = GetResourceGroupTotalResponseBody() - self.body = temp_model.from_map(m['body']) - return self - - -class GetServiceIdentityRoleResponseBody(TeaModel): - def __init__( - self, - request_id: str = None, - role_name: str = None, - ): - self.request_id = request_id - self.role_name = role_name - - def validate(self): - pass - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.request_id is not None: - result['RequestId'] = self.request_id - if self.role_name is not None: - result['RoleName'] = self.role_name + if self.allocate_strategy is not None: + result['AllocateStrategy'] = self.allocate_strategy + if self.creator_id is not None: + result['CreatorId'] = self.creator_id + if self.description is not None: + result['Description'] = self.description + if self.gmt_created_time is not None: + result['GmtCreatedTime'] = self.gmt_created_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.latest_operation_id is not None: + result['LatestOperationId'] = self.latest_operation_id + if self.min is not None: + result['Min'] = self.min.to_map() + if self.parent_quota_id is not None: + result['ParentQuotaId'] = self.parent_quota_id + if self.queue_strategy is not None: + result['QueueStrategy'] = self.queue_strategy + if self.quota_config is not None: + result['QuotaConfig'] = self.quota_config.to_map() + if self.quota_details is not None: + result['QuotaDetails'] = self.quota_details.to_map() + if self.quota_id is not None: + result['QuotaId'] = self.quota_id + if self.quota_name is not None: + result['QuotaName'] = self.quota_name + if self.reason_code is not None: + result['ReasonCode'] = self.reason_code + if self.reason_message is not None: + result['ReasonMessage'] = self.reason_message + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.resource_group_ids is not None: + result['ResourceGroupIds'] = self.resource_group_ids + if self.resource_type is not None: + result['ResourceType'] = self.resource_type + if self.status is not None: + result['Status'] = self.status + result['SubQuotas'] = [] + if self.sub_quotas is not None: + for k in self.sub_quotas: + result['SubQuotas'].append(k.to_map() if k else None) + result['Workspaces'] = [] + if self.workspaces is not None: + for k in self.workspaces: + result['Workspaces'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() + if m.get('AllocateStrategy') is not None: + self.allocate_strategy = m.get('AllocateStrategy') + if m.get('CreatorId') is not None: + self.creator_id = m.get('CreatorId') + if m.get('Description') is not None: + self.description = m.get('Description') + if m.get('GmtCreatedTime') is not None: + self.gmt_created_time = m.get('GmtCreatedTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = Label() + self.labels.append(temp_model.from_map(k)) + if m.get('LatestOperationId') is not None: + self.latest_operation_id = m.get('LatestOperationId') + if m.get('Min') is not None: + temp_model = ResourceSpec() + self.min = temp_model.from_map(m['Min']) + if m.get('ParentQuotaId') is not None: + self.parent_quota_id = m.get('ParentQuotaId') + if m.get('QueueStrategy') is not None: + self.queue_strategy = m.get('QueueStrategy') + if m.get('QuotaConfig') is not None: + temp_model = QuotaConfig() + self.quota_config = temp_model.from_map(m['QuotaConfig']) + if m.get('QuotaDetails') is not None: + temp_model = QuotaDetails() + self.quota_details = temp_model.from_map(m['QuotaDetails']) + if m.get('QuotaId') is not None: + self.quota_id = m.get('QuotaId') + if m.get('QuotaName') is not None: + self.quota_name = m.get('QuotaName') + if m.get('ReasonCode') is not None: + self.reason_code = m.get('ReasonCode') + if m.get('ReasonMessage') is not None: + self.reason_message = m.get('ReasonMessage') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('RoleName') is not None: - self.role_name = m.get('RoleName') + if m.get('ResourceGroupIds') is not None: + self.resource_group_ids = m.get('ResourceGroupIds') + if m.get('ResourceType') is not None: + self.resource_type = m.get('ResourceType') + if m.get('Status') is not None: + self.status = m.get('Status') + self.sub_quotas = [] + if m.get('SubQuotas') is not None: + for k in m.get('SubQuotas'): + temp_model = QuotaIdName() + self.sub_quotas.append(temp_model.from_map(k)) + self.workspaces = [] + if m.get('Workspaces') is not None: + for k in m.get('Workspaces'): + temp_model = WorkspaceIdName() + self.workspaces.append(temp_model.from_map(k)) return self -class GetServiceIdentityRoleResponse(TeaModel): +class GetQuotaResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetServiceIdentityRoleResponseBody = None, + body: GetQuotaResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -9668,19 +9927,31 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetServiceIdentityRoleResponseBody() + temp_model = GetQuotaResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetTokenRequest(TeaModel): +class GetQuotaJobViewMetricsRequest(TeaModel): def __init__( self, - expire_time: int = None, - training_job_id: str = None, + end_time: str = None, + order: str = None, + page_number: int = None, + page_size: int = None, + sort_by: str = None, + start_time: str = None, + time_step: str = None, + workspace_id: str = None, ): - self.expire_time = expire_time - self.training_job_id = training_job_id + self.end_time = end_time + self.order = order + self.page_number = page_number + self.page_size = page_size + self.sort_by = sort_by + self.start_time = start_time + self.time_step = time_step + self.workspace_id = workspace_id def validate(self): pass @@ -9691,32 +9962,67 @@ def to_map(self): return _map result = dict() - if self.expire_time is not None: - result['ExpireTime'] = self.expire_time - if self.training_job_id is not None: - result['TrainingJobId'] = self.training_job_id + if self.end_time is not None: + result['EndTime'] = self.end_time + if self.order is not None: + result['Order'] = self.order + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.sort_by is not None: + result['SortBy'] = self.sort_by + if self.start_time is not None: + result['StartTime'] = self.start_time + if self.time_step is not None: + result['TimeStep'] = self.time_step + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ExpireTime') is not None: - self.expire_time = m.get('ExpireTime') - if m.get('TrainingJobId') is not None: - self.training_job_id = m.get('TrainingJobId') + if m.get('EndTime') is not None: + self.end_time = m.get('EndTime') + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') + if m.get('StartTime') is not None: + self.start_time = m.get('StartTime') + if m.get('TimeStep') is not None: + self.time_step = m.get('TimeStep') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class GetTokenResponseBody(TeaModel): +class GetQuotaJobViewMetricsResponseBody(TeaModel): def __init__( self, + job_metrics: List[QuotaJobViewMetric] = None, + quota_id: str = None, request_id: str = None, - token: str = None, + summary: QuotaJobViewMetric = None, + total_count: int = None, ): + self.job_metrics = job_metrics + self.quota_id = quota_id self.request_id = request_id - self.token = token + self.summary = summary + self.total_count = total_count def validate(self): - pass + if self.job_metrics: + for k in self.job_metrics: + if k: + k.validate() + if self.summary: + self.summary.validate() def to_map(self): _map = super().to_map() @@ -9724,36 +10030,51 @@ def to_map(self): return _map result = dict() + result['JobMetrics'] = [] + if self.job_metrics is not None: + for k in self.job_metrics: + result['JobMetrics'].append(k.to_map() if k else None) + if self.quota_id is not None: + result['QuotaId'] = self.quota_id if self.request_id is not None: result['RequestId'] = self.request_id - if self.token is not None: - result['Token'] = self.token + if self.summary is not None: + result['Summary'] = self.summary.to_map() + if self.total_count is not None: + result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() + self.job_metrics = [] + if m.get('JobMetrics') is not None: + for k in m.get('JobMetrics'): + temp_model = QuotaJobViewMetric() + self.job_metrics.append(temp_model.from_map(k)) + if m.get('QuotaId') is not None: + self.quota_id = m.get('QuotaId') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('Token') is not None: - self.token = m.get('Token') + if m.get('Summary') is not None: + temp_model = QuotaJobViewMetric() + self.summary = temp_model.from_map(m['Summary']) + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') return self -class GetTokenResponse(TeaModel): +class GetQuotaJobViewMetricsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetTokenResponseBody = None, + body: GetQuotaJobViewMetricsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -9778,17 +10099,23 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetTokenResponseBody() + temp_model = GetQuotaJobViewMetricsResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetTrainingJobRequest(TeaModel): +class GetQuotaMetricsRequest(TeaModel): def __init__( self, - token: str = None, + end_time: str = None, + gputype: str = None, + start_time: str = None, + time_step: str = None, ): - self.token = token + self.end_time = end_time + self.gputype = gputype + self.start_time = start_time + self.time_step = time_step def validate(self): pass @@ -9799,28 +10126,45 @@ def to_map(self): return _map result = dict() - if self.token is not None: - result['Token'] = self.token + if self.end_time is not None: + result['EndTime'] = self.end_time + if self.gputype is not None: + result['GPUType'] = self.gputype + if self.start_time is not None: + result['StartTime'] = self.start_time + if self.time_step is not None: + result['TimeStep'] = self.time_step return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Token') is not None: - self.token = m.get('Token') + if m.get('EndTime') is not None: + self.end_time = m.get('EndTime') + if m.get('GPUType') is not None: + self.gputype = m.get('GPUType') + if m.get('StartTime') is not None: + self.start_time = m.get('StartTime') + if m.get('TimeStep') is not None: + self.time_step = m.get('TimeStep') return self -class GetTrainingJobResponseBodyComputeResource(TeaModel): +class GetQuotaMetricsResponseBody(TeaModel): def __init__( self, - ecs_count: int = None, - ecs_spec: str = None, + quota_id: str = None, + quota_metrics: List[QuotaMetric] = None, + request_id: str = None, ): - self.ecs_count = ecs_count - self.ecs_spec = ecs_spec + self.quota_id = quota_id + self.quota_metrics = quota_metrics + self.request_id = request_id def validate(self): - pass + if self.quota_metrics: + for k in self.quota_metrics: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -9828,67 +10172,44 @@ def to_map(self): return _map result = dict() - if self.ecs_count is not None: - result['EcsCount'] = self.ecs_count - if self.ecs_spec is not None: - result['EcsSpec'] = self.ecs_spec - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('EcsCount') is not None: - self.ecs_count = m.get('EcsCount') - if m.get('EcsSpec') is not None: - self.ecs_spec = m.get('EcsSpec') - return self - - -class GetTrainingJobResponseBodyHyperParameters(TeaModel): - def __init__( - self, - name: str = None, - value: str = None, - ): - self.name = name - self.value = value - - def validate(self): - pass - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.name is not None: - result['Name'] = self.name - if self.value is not None: - result['Value'] = self.value + if self.quota_id is not None: + result['QuotaId'] = self.quota_id + result['QuotaMetrics'] = [] + if self.quota_metrics is not None: + for k in self.quota_metrics: + result['QuotaMetrics'].append(k.to_map() if k else None) + if self.request_id is not None: + result['RequestId'] = self.request_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Name') is not None: - self.name = m.get('Name') - if m.get('Value') is not None: - self.value = m.get('Value') + if m.get('QuotaId') is not None: + self.quota_id = m.get('QuotaId') + self.quota_metrics = [] + if m.get('QuotaMetrics') is not None: + for k in m.get('QuotaMetrics'): + temp_model = QuotaMetric() + self.quota_metrics.append(temp_model.from_map(k)) + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') return self -class GetTrainingJobResponseBodyInputChannels(TeaModel): +class GetQuotaMetricsResponse(TeaModel): def __init__( self, - dataset_id: str = None, - input_uri: str = None, - name: str = None, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetQuotaMetricsResponseBody = None, ): - self.dataset_id = dataset_id - self.input_uri = input_uri - self.name = name + self.headers = headers + self.status_code = status_code + self.body = body def validate(self): - pass + if self.body: + self.body.validate() def to_map(self): _map = super().to_map() @@ -9896,35 +10217,40 @@ def to_map(self): return _map result = dict() - if self.dataset_id is not None: - result['DatasetId'] = self.dataset_id - if self.input_uri is not None: - result['InputUri'] = self.input_uri - if self.name is not None: - result['Name'] = self.name + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() return result def from_map(self, m: dict = None): m = m or dict() - if m.get('DatasetId') is not None: - self.dataset_id = m.get('DatasetId') - if m.get('InputUri') is not None: - self.input_uri = m.get('InputUri') - if m.get('Name') is not None: - self.name = m.get('Name') + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetQuotaMetricsResponseBody() + self.body = temp_model.from_map(m['body']) return self -class GetTrainingJobResponseBodyInstances(TeaModel): +class GetQuotaNodeMetricsRequest(TeaModel): def __init__( self, - name: str = None, - role: str = None, - status: str = None, + end_time: str = None, + gputype: str = None, + start_time: str = None, + time_step: str = None, + verbose: bool = None, ): - self.name = name - self.role = role - self.status = status + self.end_time = end_time + self.gputype = gputype + self.start_time = start_time + self.time_step = time_step + self.verbose = verbose def validate(self): pass @@ -9935,36 +10261,51 @@ def to_map(self): return _map result = dict() - if self.name is not None: - result['Name'] = self.name - if self.role is not None: - result['Role'] = self.role - if self.status is not None: - result['Status'] = self.status + if self.end_time is not None: + result['EndTime'] = self.end_time + if self.gputype is not None: + result['GPUType'] = self.gputype + if self.start_time is not None: + result['StartTime'] = self.start_time + if self.time_step is not None: + result['TimeStep'] = self.time_step + if self.verbose is not None: + result['Verbose'] = self.verbose return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Name') is not None: - self.name = m.get('Name') - if m.get('Role') is not None: - self.role = m.get('Role') - if m.get('Status') is not None: - self.status = m.get('Status') + if m.get('EndTime') is not None: + self.end_time = m.get('EndTime') + if m.get('GPUType') is not None: + self.gputype = m.get('GPUType') + if m.get('StartTime') is not None: + self.start_time = m.get('StartTime') + if m.get('TimeStep') is not None: + self.time_step = m.get('TimeStep') + if m.get('Verbose') is not None: + self.verbose = m.get('Verbose') return self -class GetTrainingJobResponseBodyLabels(TeaModel): +class GetQuotaNodeMetricsResponseBody(TeaModel): def __init__( self, - key: str = None, - value: str = None, + metric_type: str = None, + nodes_metrics: List[NodeMetric] = None, + quota_id: str = None, + request_id: str = None, ): - self.key = key - self.value = value + self.metric_type = metric_type + self.nodes_metrics = nodes_metrics + self.quota_id = quota_id + self.request_id = request_id def validate(self): - pass + if self.nodes_metrics: + for k in self.nodes_metrics: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -9972,34 +10313,48 @@ def to_map(self): return _map result = dict() - if self.key is not None: - result['Key'] = self.key - if self.value is not None: - result['Value'] = self.value + if self.metric_type is not None: + result['MetricType'] = self.metric_type + result['NodesMetrics'] = [] + if self.nodes_metrics is not None: + for k in self.nodes_metrics: + result['NodesMetrics'].append(k.to_map() if k else None) + if self.quota_id is not None: + result['QuotaId'] = self.quota_id + if self.request_id is not None: + result['RequestId'] = self.request_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Key') is not None: - self.key = m.get('Key') - if m.get('Value') is not None: - self.value = m.get('Value') + if m.get('MetricType') is not None: + self.metric_type = m.get('MetricType') + self.nodes_metrics = [] + if m.get('NodesMetrics') is not None: + for k in m.get('NodesMetrics'): + temp_model = NodeMetric() + self.nodes_metrics.append(temp_model.from_map(k)) + if m.get('QuotaId') is not None: + self.quota_id = m.get('QuotaId') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') return self -class GetTrainingJobResponseBodyLatestMetrics(TeaModel): +class GetQuotaNodeMetricsResponse(TeaModel): def __init__( self, - name: str = None, - timestamp: str = None, - value: float = None, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetQuotaNodeMetricsResponseBody = None, ): - self.name = name - self.timestamp = timestamp - self.value = value + self.headers = headers + self.status_code = status_code + self.body = body def validate(self): - pass + if self.body: + self.body.validate() def to_map(self): _map = super().to_map() @@ -10007,33 +10362,52 @@ def to_map(self): return _map result = dict() - if self.name is not None: - result['Name'] = self.name - if self.timestamp is not None: - result['Timestamp'] = self.timestamp - if self.value is not None: - result['Value'] = self.value + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Name') is not None: - self.name = m.get('Name') - if m.get('Timestamp') is not None: - self.timestamp = m.get('Timestamp') - if m.get('Value') is not None: - self.value = m.get('Value') + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetQuotaNodeMetricsResponseBody() + self.body = temp_model.from_map(m['body']) return self -class GetTrainingJobResponseBodyLatestProgressOverallProgress(TeaModel): +class GetQuotaNodeViewMetricsRequest(TeaModel): def __init__( self, - timestamp: str = None, - value: float = None, + node_id: str = None, + node_status: str = None, + order: str = None, + order_status: str = None, + page_number: int = None, + page_size: int = None, + resource_group_id: str = None, + self_only: bool = None, + sort_by: str = None, + time_step: str = None, + workspace_id: str = None, ): - self.timestamp = timestamp - self.value = value + self.node_id = node_id + self.node_status = node_status + self.order = order + self.order_status = order_status + self.page_number = page_number + self.page_size = page_size + self.resource_group_id = resource_group_id + self.self_only = self_only + self.sort_by = sort_by + self.time_step = time_step + self.workspace_id = workspace_id def validate(self): pass @@ -10044,32 +10418,75 @@ def to_map(self): return _map result = dict() - if self.timestamp is not None: - result['Timestamp'] = self.timestamp - if self.value is not None: - result['Value'] = self.value + if self.node_id is not None: + result['NodeId'] = self.node_id + if self.node_status is not None: + result['NodeStatus'] = self.node_status + if self.order is not None: + result['Order'] = self.order + if self.order_status is not None: + result['OrderStatus'] = self.order_status + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.resource_group_id is not None: + result['ResourceGroupId'] = self.resource_group_id + if self.self_only is not None: + result['SelfOnly'] = self.self_only + if self.sort_by is not None: + result['SortBy'] = self.sort_by + if self.time_step is not None: + result['TimeStep'] = self.time_step + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Timestamp') is not None: - self.timestamp = m.get('Timestamp') - if m.get('Value') is not None: - self.value = m.get('Value') + if m.get('NodeId') is not None: + self.node_id = m.get('NodeId') + if m.get('NodeStatus') is not None: + self.node_status = m.get('NodeStatus') + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('OrderStatus') is not None: + self.order_status = m.get('OrderStatus') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('ResourceGroupId') is not None: + self.resource_group_id = m.get('ResourceGroupId') + if m.get('SelfOnly') is not None: + self.self_only = m.get('SelfOnly') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') + if m.get('TimeStep') is not None: + self.time_step = m.get('TimeStep') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class GetTrainingJobResponseBodyLatestProgressRemainingTime(TeaModel): +class GetQuotaNodeViewMetricsResponseBody(TeaModel): def __init__( self, - timestamp: str = None, - value: int = None, + node_metrics: List[QuotaNodeViewMetric] = None, + quota_id: str = None, + request_id: str = None, + total_count: int = None, ): - self.timestamp = timestamp - self.value = value + self.node_metrics = node_metrics + self.quota_id = quota_id + self.request_id = request_id + self.total_count = total_count def validate(self): - pass + if self.node_metrics: + for k in self.node_metrics: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -10077,35 +10494,48 @@ def to_map(self): return _map result = dict() - if self.timestamp is not None: - result['Timestamp'] = self.timestamp - if self.value is not None: - result['Value'] = self.value + result['NodeMetrics'] = [] + if self.node_metrics is not None: + for k in self.node_metrics: + result['NodeMetrics'].append(k.to_map() if k else None) + if self.quota_id is not None: + result['QuotaId'] = self.quota_id + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.total_count is not None: + result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Timestamp') is not None: - self.timestamp = m.get('Timestamp') - if m.get('Value') is not None: - self.value = m.get('Value') + self.node_metrics = [] + if m.get('NodeMetrics') is not None: + for k in m.get('NodeMetrics'): + temp_model = QuotaNodeViewMetric() + self.node_metrics.append(temp_model.from_map(k)) + if m.get('QuotaId') is not None: + self.quota_id = m.get('QuotaId') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') return self -class GetTrainingJobResponseBodyLatestProgress(TeaModel): +class GetQuotaNodeViewMetricsResponse(TeaModel): def __init__( self, - overall_progress: GetTrainingJobResponseBodyLatestProgressOverallProgress = None, - remaining_time: GetTrainingJobResponseBodyLatestProgressRemainingTime = None, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetQuotaNodeViewMetricsResponseBody = None, ): - self.overall_progress = overall_progress - self.remaining_time = remaining_time + self.headers = headers + self.status_code = status_code + self.body = body def validate(self): - if self.overall_progress: - self.overall_progress.validate() - if self.remaining_time: - self.remaining_time.validate() + if self.body: + self.body.validate() def to_map(self): _map = super().to_map() @@ -10113,33 +10543,52 @@ def to_map(self): return _map result = dict() - if self.overall_progress is not None: - result['OverallProgress'] = self.overall_progress.to_map() - if self.remaining_time is not None: - result['RemainingTime'] = self.remaining_time.to_map() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() return result def from_map(self, m: dict = None): m = m or dict() - if m.get('OverallProgress') is not None: - temp_model = GetTrainingJobResponseBodyLatestProgressOverallProgress() - self.overall_progress = temp_model.from_map(m['OverallProgress']) - if m.get('RemainingTime') is not None: - temp_model = GetTrainingJobResponseBodyLatestProgressRemainingTime() - self.remaining_time = temp_model.from_map(m['RemainingTime']) + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetQuotaNodeViewMetricsResponseBody() + self.body = temp_model.from_map(m['body']) return self -class GetTrainingJobResponseBodyOutputChannels(TeaModel): +class GetQuotaQueueInfoRequest(TeaModel): def __init__( self, - dataset_id: str = None, - name: str = None, - output_uri: str = None, + before_workload_id: str = None, + order: str = None, + page_number: int = None, + page_size: int = None, + show_own: bool = None, + sort_by: str = None, + status: str = None, + sub_quota_ids: str = None, + workload_ids: str = None, + workload_type: str = None, + workspace_ids: str = None, ): - self.dataset_id = dataset_id - self.name = name - self.output_uri = output_uri + self.before_workload_id = before_workload_id + self.order = order + self.page_number = page_number + self.page_size = page_size + self.show_own = show_own + self.sort_by = sort_by + self.status = status + self.sub_quota_ids = sub_quota_ids + self.workload_ids = workload_ids + self.workload_type = workload_type + self.workspace_ids = workspace_ids def validate(self): pass @@ -10150,34 +10599,73 @@ def to_map(self): return _map result = dict() - if self.dataset_id is not None: - result['DatasetId'] = self.dataset_id - if self.name is not None: - result['Name'] = self.name - if self.output_uri is not None: - result['OutputUri'] = self.output_uri + if self.before_workload_id is not None: + result['BeforeWorkloadId'] = self.before_workload_id + if self.order is not None: + result['Order'] = self.order + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.show_own is not None: + result['ShowOwn'] = self.show_own + if self.sort_by is not None: + result['SortBy'] = self.sort_by + if self.status is not None: + result['Status'] = self.status + if self.sub_quota_ids is not None: + result['SubQuotaIds'] = self.sub_quota_ids + if self.workload_ids is not None: + result['WorkloadIds'] = self.workload_ids + if self.workload_type is not None: + result['WorkloadType'] = self.workload_type + if self.workspace_ids is not None: + result['WorkspaceIds'] = self.workspace_ids return result def from_map(self, m: dict = None): m = m or dict() - if m.get('DatasetId') is not None: - self.dataset_id = m.get('DatasetId') - if m.get('Name') is not None: - self.name = m.get('Name') - if m.get('OutputUri') is not None: - self.output_uri = m.get('OutputUri') + if m.get('BeforeWorkloadId') is not None: + self.before_workload_id = m.get('BeforeWorkloadId') + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('ShowOwn') is not None: + self.show_own = m.get('ShowOwn') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') + if m.get('Status') is not None: + self.status = m.get('Status') + if m.get('SubQuotaIds') is not None: + self.sub_quota_ids = m.get('SubQuotaIds') + if m.get('WorkloadIds') is not None: + self.workload_ids = m.get('WorkloadIds') + if m.get('WorkloadType') is not None: + self.workload_type = m.get('WorkloadType') + if m.get('WorkspaceIds') is not None: + self.workspace_ids = m.get('WorkspaceIds') return self -class GetTrainingJobResponseBodyScheduler(TeaModel): +class GetQuotaQueueInfoResponseBody(TeaModel): def __init__( self, - max_running_time_in_seconds: int = None, + queue_infos: List[QueueInfo] = None, + request_id: str = None, + total_count: int = None, ): - self.max_running_time_in_seconds = max_running_time_in_seconds + self.queue_infos = queue_infos + self.request_id = request_id + self.total_count = total_count def validate(self): - pass + if self.queue_infos: + for k in self.queue_infos: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -10185,34 +10673,44 @@ def to_map(self): return _map result = dict() - if self.max_running_time_in_seconds is not None: - result['MaxRunningTimeInSeconds'] = self.max_running_time_in_seconds + result['QueueInfos'] = [] + if self.queue_infos is not None: + for k in self.queue_infos: + result['QueueInfos'].append(k.to_map() if k else None) + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.total_count is not None: + result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() - if m.get('MaxRunningTimeInSeconds') is not None: - self.max_running_time_in_seconds = m.get('MaxRunningTimeInSeconds') + self.queue_infos = [] + if m.get('QueueInfos') is not None: + for k in m.get('QueueInfos'): + temp_model = QueueInfo() + self.queue_infos.append(temp_model.from_map(k)) + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') return self -class GetTrainingJobResponseBodyStatusTransitions(TeaModel): +class GetQuotaQueueInfoResponse(TeaModel): def __init__( self, - end_time: str = None, - reason_code: str = None, - reason_message: str = None, - start_time: str = None, - status: str = None, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetQuotaQueueInfoResponseBody = None, ): - self.end_time = end_time - self.reason_code = reason_code - self.reason_message = reason_message - self.start_time = start_time - self.status = status + self.headers = headers + self.status_code = status_code + self.body = body def validate(self): - pass + if self.body: + self.body.validate() def to_map(self): _map = super().to_map() @@ -10220,45 +10718,46 @@ def to_map(self): return _map result = dict() - if self.end_time is not None: - result['EndTime'] = self.end_time - if self.reason_code is not None: - result['ReasonCode'] = self.reason_code - if self.reason_message is not None: - result['ReasonMessage'] = self.reason_message - if self.start_time is not None: - result['StartTime'] = self.start_time - if self.status is not None: - result['Status'] = self.status + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() return result def from_map(self, m: dict = None): m = m or dict() - if m.get('EndTime') is not None: - self.end_time = m.get('EndTime') - if m.get('ReasonCode') is not None: - self.reason_code = m.get('ReasonCode') - if m.get('ReasonMessage') is not None: - self.reason_message = m.get('ReasonMessage') - if m.get('StartTime') is not None: - self.start_time = m.get('StartTime') - if m.get('Status') is not None: - self.status = m.get('Status') + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetQuotaQueueInfoResponseBody() + self.body = temp_model.from_map(m['body']) return self -class GetTrainingJobResponseBodyUserVpc(TeaModel): +class GetQuotaRangeUserViewMetricsRequest(TeaModel): def __init__( self, - extended_cidrs: List[str] = None, - security_group_id: str = None, - switch_id: str = None, - vpc_id: str = None, + end_time: str = None, + order: str = None, + page_number: int = None, + page_size: int = None, + sort_by: str = None, + start_time: str = None, + user_id: str = None, + workspace_id: str = None, ): - self.extended_cidrs = extended_cidrs - self.security_group_id = security_group_id - self.switch_id = switch_id - self.vpc_id = vpc_id + self.end_time = end_time + self.order = order + self.page_number = page_number + self.page_size = page_size + self.sort_by = sort_by + self.start_time = start_time + self.user_id = user_id + self.workspace_id = workspace_id def validate(self): pass @@ -10269,133 +10768,67 @@ def to_map(self): return _map result = dict() - if self.extended_cidrs is not None: - result['ExtendedCIDRs'] = self.extended_cidrs - if self.security_group_id is not None: - result['SecurityGroupId'] = self.security_group_id - if self.switch_id is not None: - result['SwitchId'] = self.switch_id - if self.vpc_id is not None: - result['VpcId'] = self.vpc_id + if self.end_time is not None: + result['EndTime'] = self.end_time + if self.order is not None: + result['Order'] = self.order + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.sort_by is not None: + result['SortBy'] = self.sort_by + if self.start_time is not None: + result['StartTime'] = self.start_time + if self.user_id is not None: + result['UserId'] = self.user_id + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ExtendedCIDRs') is not None: - self.extended_cidrs = m.get('ExtendedCIDRs') - if m.get('SecurityGroupId') is not None: - self.security_group_id = m.get('SecurityGroupId') - if m.get('SwitchId') is not None: - self.switch_id = m.get('SwitchId') - if m.get('VpcId') is not None: - self.vpc_id = m.get('VpcId') + if m.get('EndTime') is not None: + self.end_time = m.get('EndTime') + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') + if m.get('StartTime') is not None: + self.start_time = m.get('StartTime') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class GetTrainingJobResponseBody(TeaModel): +class GetQuotaRangeUserViewMetricsResponseBody(TeaModel): def __init__( self, - algorithm_id: str = None, - algorithm_name: str = None, - algorithm_provider: str = None, - algorithm_spec: AlgorithmSpec = None, - algorithm_version: str = None, - compute_resource: GetTrainingJobResponseBodyComputeResource = None, - gmt_create_time: str = None, - gmt_modified_time: str = None, - hyper_parameters: List[GetTrainingJobResponseBodyHyperParameters] = None, - input_channels: List[GetTrainingJobResponseBodyInputChannels] = None, - instances: List[GetTrainingJobResponseBodyInstances] = None, - is_temp_algo: bool = None, - labels: List[GetTrainingJobResponseBodyLabels] = None, - latest_metrics: List[GetTrainingJobResponseBodyLatestMetrics] = None, - latest_progress: GetTrainingJobResponseBodyLatestProgress = None, - output_channels: List[GetTrainingJobResponseBodyOutputChannels] = None, - reason_code: str = None, - reason_message: str = None, + quota_id: str = None, request_id: str = None, - role_arn: str = None, - scheduler: GetTrainingJobResponseBodyScheduler = None, - status: str = None, - status_transitions: List[GetTrainingJobResponseBodyStatusTransitions] = None, - training_job_description: str = None, - training_job_id: str = None, - training_job_name: str = None, - training_job_url: str = None, - user_id: str = None, - user_vpc: GetTrainingJobResponseBodyUserVpc = None, - workspace_id: str = None, + summary: QuotaUserViewMetric = None, + total_count: int = None, + user_metrics: List[QuotaUserViewMetric] = None, ): - self.algorithm_id = algorithm_id - self.algorithm_name = algorithm_name - self.algorithm_provider = algorithm_provider - self.algorithm_spec = algorithm_spec - self.algorithm_version = algorithm_version - self.compute_resource = compute_resource - self.gmt_create_time = gmt_create_time - self.gmt_modified_time = gmt_modified_time - self.hyper_parameters = hyper_parameters - self.input_channels = input_channels - self.instances = instances - self.is_temp_algo = is_temp_algo - self.labels = labels - self.latest_metrics = latest_metrics - self.latest_progress = latest_progress - self.output_channels = output_channels - self.reason_code = reason_code - self.reason_message = reason_message + self.quota_id = quota_id self.request_id = request_id - self.role_arn = role_arn - self.scheduler = scheduler - self.status = status - self.status_transitions = status_transitions - self.training_job_description = training_job_description - self.training_job_id = training_job_id - self.training_job_name = training_job_name - self.training_job_url = training_job_url - self.user_id = user_id - self.user_vpc = user_vpc - self.workspace_id = workspace_id + self.summary = summary + self.total_count = total_count + self.user_metrics = user_metrics def validate(self): - if self.algorithm_spec: - self.algorithm_spec.validate() - if self.compute_resource: - self.compute_resource.validate() - if self.hyper_parameters: - for k in self.hyper_parameters: - if k: - k.validate() - if self.input_channels: - for k in self.input_channels: - if k: - k.validate() - if self.instances: - for k in self.instances: - if k: - k.validate() - if self.labels: - for k in self.labels: - if k: - k.validate() - if self.latest_metrics: - for k in self.latest_metrics: - if k: - k.validate() - if self.latest_progress: - self.latest_progress.validate() - if self.output_channels: - for k in self.output_channels: - if k: - k.validate() - if self.scheduler: - self.scheduler.validate() - if self.status_transitions: - for k in self.status_transitions: + if self.summary: + self.summary.validate() + if self.user_metrics: + for k in self.user_metrics: if k: k.validate() - if self.user_vpc: - self.user_vpc.validate() def to_map(self): _map = super().to_map() @@ -10403,188 +10836,4459 @@ def to_map(self): return _map result = dict() - if self.algorithm_id is not None: - result['AlgorithmId'] = self.algorithm_id - if self.algorithm_name is not None: - result['AlgorithmName'] = self.algorithm_name - if self.algorithm_provider is not None: - result['AlgorithmProvider'] = self.algorithm_provider - if self.algorithm_spec is not None: - result['AlgorithmSpec'] = self.algorithm_spec.to_map() - if self.algorithm_version is not None: - result['AlgorithmVersion'] = self.algorithm_version - if self.compute_resource is not None: - result['ComputeResource'] = self.compute_resource.to_map() - if self.gmt_create_time is not None: - result['GmtCreateTime'] = self.gmt_create_time - if self.gmt_modified_time is not None: - result['GmtModifiedTime'] = self.gmt_modified_time - result['HyperParameters'] = [] - if self.hyper_parameters is not None: - for k in self.hyper_parameters: - result['HyperParameters'].append(k.to_map() if k else None) - result['InputChannels'] = [] - if self.input_channels is not None: - for k in self.input_channels: - result['InputChannels'].append(k.to_map() if k else None) - result['Instances'] = [] - if self.instances is not None: - for k in self.instances: - result['Instances'].append(k.to_map() if k else None) - if self.is_temp_algo is not None: - result['IsTempAlgo'] = self.is_temp_algo - result['Labels'] = [] - if self.labels is not None: - for k in self.labels: - result['Labels'].append(k.to_map() if k else None) - result['LatestMetrics'] = [] - if self.latest_metrics is not None: - for k in self.latest_metrics: - result['LatestMetrics'].append(k.to_map() if k else None) - if self.latest_progress is not None: - result['LatestProgress'] = self.latest_progress.to_map() - result['OutputChannels'] = [] - if self.output_channels is not None: - for k in self.output_channels: - result['OutputChannels'].append(k.to_map() if k else None) - if self.reason_code is not None: - result['ReasonCode'] = self.reason_code - if self.reason_message is not None: - result['ReasonMessage'] = self.reason_message + if self.quota_id is not None: + result['QuotaId'] = self.quota_id if self.request_id is not None: result['RequestId'] = self.request_id - if self.role_arn is not None: - result['RoleArn'] = self.role_arn - if self.scheduler is not None: - result['Scheduler'] = self.scheduler.to_map() - if self.status is not None: - result['Status'] = self.status - result['StatusTransitions'] = [] - if self.status_transitions is not None: - for k in self.status_transitions: - result['StatusTransitions'].append(k.to_map() if k else None) - if self.training_job_description is not None: - result['TrainingJobDescription'] = self.training_job_description - if self.training_job_id is not None: - result['TrainingJobId'] = self.training_job_id - if self.training_job_name is not None: - result['TrainingJobName'] = self.training_job_name - if self.training_job_url is not None: - result['TrainingJobUrl'] = self.training_job_url + if self.summary is not None: + result['Summary'] = self.summary.to_map() + if self.total_count is not None: + result['TotalCount'] = self.total_count + result['UserMetrics'] = [] + if self.user_metrics is not None: + for k in self.user_metrics: + result['UserMetrics'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('QuotaId') is not None: + self.quota_id = m.get('QuotaId') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('Summary') is not None: + temp_model = QuotaUserViewMetric() + self.summary = temp_model.from_map(m['Summary']) + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') + self.user_metrics = [] + if m.get('UserMetrics') is not None: + for k in m.get('UserMetrics'): + temp_model = QuotaUserViewMetric() + self.user_metrics.append(temp_model.from_map(k)) + return self + + +class GetQuotaRangeUserViewMetricsResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetQuotaRangeUserViewMetricsResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetQuotaRangeUserViewMetricsResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class GetQuotaTopoRequest(TeaModel): + def __init__( + self, + depth: int = None, + show_own_workloads: bool = None, + verbose: bool = None, + ): + self.depth = depth + self.show_own_workloads = show_own_workloads + self.verbose = verbose + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.depth is not None: + result['Depth'] = self.depth + if self.show_own_workloads is not None: + result['ShowOwnWorkloads'] = self.show_own_workloads + if self.verbose is not None: + result['Verbose'] = self.verbose + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Depth') is not None: + self.depth = m.get('Depth') + if m.get('ShowOwnWorkloads') is not None: + self.show_own_workloads = m.get('ShowOwnWorkloads') + if m.get('Verbose') is not None: + self.verbose = m.get('Verbose') + return self + + +class GetQuotaTopoResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['requestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('requestId') is not None: + self.request_id = m.get('requestId') + return self + + +class GetQuotaTopoResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetQuotaTopoResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetQuotaTopoResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class GetQuotaUserViewMetricsRequest(TeaModel): + def __init__( + self, + order: str = None, + page_number: str = None, + page_size: str = None, + sort_by: str = None, + time_step: str = None, + user_id: str = None, + workspace_id: str = None, + ): + self.order = order + # This parameter is required. + self.page_number = page_number + # This parameter is required. + self.page_size = page_size + self.sort_by = sort_by + self.time_step = time_step + self.user_id = user_id + self.workspace_id = workspace_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.order is not None: + result['Order'] = self.order + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.sort_by is not None: + result['SortBy'] = self.sort_by + if self.time_step is not None: + result['TimeStep'] = self.time_step + if self.user_id is not None: + result['UserId'] = self.user_id + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') + if m.get('TimeStep') is not None: + self.time_step = m.get('TimeStep') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + return self + + +class GetQuotaUserViewMetricsResponseBody(TeaModel): + def __init__( + self, + quota_id: str = None, + request_id: str = None, + summary: QuotaUserViewMetric = None, + total_count: int = None, + user_metrics: List[QuotaUserViewMetric] = None, + ): + self.quota_id = quota_id + self.request_id = request_id + self.summary = summary + self.total_count = total_count + self.user_metrics = user_metrics + + def validate(self): + if self.summary: + self.summary.validate() + if self.user_metrics: + for k in self.user_metrics: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.quota_id is not None: + result['QuotaId'] = self.quota_id + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.summary is not None: + result['Summary'] = self.summary.to_map() + if self.total_count is not None: + result['TotalCount'] = self.total_count + result['UserMetrics'] = [] + if self.user_metrics is not None: + for k in self.user_metrics: + result['UserMetrics'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('QuotaId') is not None: + self.quota_id = m.get('QuotaId') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('Summary') is not None: + temp_model = QuotaUserViewMetric() + self.summary = temp_model.from_map(m['Summary']) + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') + self.user_metrics = [] + if m.get('UserMetrics') is not None: + for k in m.get('UserMetrics'): + temp_model = QuotaUserViewMetric() + self.user_metrics.append(temp_model.from_map(k)) + return self + + +class GetQuotaUserViewMetricsResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetQuotaUserViewMetricsResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetQuotaUserViewMetricsResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class GetRangeUserViewMetricsRequest(TeaModel): + def __init__( + self, + end_time: str = None, + order: str = None, + page_number: int = None, + page_size: int = None, + sort_by: str = None, + start_time: str = None, + user_id: str = None, + workspace_id: str = None, + ): + self.end_time = end_time + self.order = order + self.page_number = page_number + self.page_size = page_size + self.sort_by = sort_by + self.start_time = start_time + self.user_id = user_id + self.workspace_id = workspace_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.end_time is not None: + result['EndTime'] = self.end_time + if self.order is not None: + result['Order'] = self.order + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.sort_by is not None: + result['SortBy'] = self.sort_by + if self.start_time is not None: + result['StartTime'] = self.start_time + if self.user_id is not None: + result['UserId'] = self.user_id + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('EndTime') is not None: + self.end_time = m.get('EndTime') + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') + if m.get('StartTime') is not None: + self.start_time = m.get('StartTime') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + return self + + +class GetRangeUserViewMetricsResponseBody(TeaModel): + def __init__( + self, + summary: UserViewMetric = None, + user_metrics: List[UserViewMetric] = None, + request_id: str = None, + ): + self.summary = summary + self.user_metrics = user_metrics + self.request_id = request_id + + def validate(self): + if self.summary: + self.summary.validate() + if self.user_metrics: + for k in self.user_metrics: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.summary is not None: + result['Summary'] = self.summary.to_map() + result['UserMetrics'] = [] + if self.user_metrics is not None: + for k in self.user_metrics: + result['UserMetrics'].append(k.to_map() if k else None) + if self.request_id is not None: + result['requestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Summary') is not None: + temp_model = UserViewMetric() + self.summary = temp_model.from_map(m['Summary']) + self.user_metrics = [] + if m.get('UserMetrics') is not None: + for k in m.get('UserMetrics'): + temp_model = UserViewMetric() + self.user_metrics.append(temp_model.from_map(k)) + if m.get('requestId') is not None: + self.request_id = m.get('requestId') + return self + + +class GetRangeUserViewMetricsResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetRangeUserViewMetricsResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetRangeUserViewMetricsResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class GetResourceGroupRequestTag(TeaModel): + def __init__( + self, + key: str = None, + value: str = None, + ): + self.key = key + self.value = value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + +class GetResourceGroupRequest(TeaModel): + def __init__( + self, + is_aiworkspace_data_enabled: bool = None, + tag: List[GetResourceGroupRequestTag] = None, + ): + self.is_aiworkspace_data_enabled = is_aiworkspace_data_enabled + self.tag = tag + + def validate(self): + if self.tag: + for k in self.tag: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.is_aiworkspace_data_enabled is not None: + result['IsAIWorkspaceDataEnabled'] = self.is_aiworkspace_data_enabled + result['Tag'] = [] + if self.tag is not None: + for k in self.tag: + result['Tag'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('IsAIWorkspaceDataEnabled') is not None: + self.is_aiworkspace_data_enabled = m.get('IsAIWorkspaceDataEnabled') + self.tag = [] + if m.get('Tag') is not None: + for k in m.get('Tag'): + temp_model = GetResourceGroupRequestTag() + self.tag.append(temp_model.from_map(k)) + return self + + +class GetResourceGroupShrinkRequest(TeaModel): + def __init__( + self, + is_aiworkspace_data_enabled: bool = None, + tag_shrink: str = None, + ): + self.is_aiworkspace_data_enabled = is_aiworkspace_data_enabled + self.tag_shrink = tag_shrink + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.is_aiworkspace_data_enabled is not None: + result['IsAIWorkspaceDataEnabled'] = self.is_aiworkspace_data_enabled + if self.tag_shrink is not None: + result['Tag'] = self.tag_shrink + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('IsAIWorkspaceDataEnabled') is not None: + self.is_aiworkspace_data_enabled = m.get('IsAIWorkspaceDataEnabled') + if m.get('Tag') is not None: + self.tag_shrink = m.get('Tag') + return self + + +class GetResourceGroupResponseBodyTags(TeaModel): + def __init__( + self, + tag_key: str = None, + tag_value: str = None, + ): + self.tag_key = tag_key + self.tag_value = tag_value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.tag_key is not None: + result['TagKey'] = self.tag_key + if self.tag_value is not None: + result['TagValue'] = self.tag_value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('TagKey') is not None: + self.tag_key = m.get('TagKey') + if m.get('TagValue') is not None: + self.tag_value = m.get('TagValue') + return self + + +class GetResourceGroupResponseBody(TeaModel): + def __init__( + self, + cluster_id: str = None, + computing_resource_provider: str = None, + creator_id: str = None, + description: str = None, + gmt_created_time: str = None, + gmt_modified_time: str = None, + name: str = None, + request_id: str = None, + resource_type: str = None, + status: str = None, + support_rdma: bool = None, + tags: List[GetResourceGroupResponseBodyTags] = None, + user_vpc: UserVpc = None, + workspace_id: str = None, + ): + self.cluster_id = cluster_id + self.computing_resource_provider = computing_resource_provider + self.creator_id = creator_id + self.description = description + self.gmt_created_time = gmt_created_time + self.gmt_modified_time = gmt_modified_time + self.name = name + self.request_id = request_id + self.resource_type = resource_type + self.status = status + self.support_rdma = support_rdma + self.tags = tags + self.user_vpc = user_vpc + self.workspace_id = workspace_id + + def validate(self): + if self.tags: + for k in self.tags: + if k: + k.validate() + if self.user_vpc: + self.user_vpc.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.cluster_id is not None: + result['ClusterID'] = self.cluster_id + if self.computing_resource_provider is not None: + result['ComputingResourceProvider'] = self.computing_resource_provider + if self.creator_id is not None: + result['CreatorID'] = self.creator_id + if self.description is not None: + result['Description'] = self.description + if self.gmt_created_time is not None: + result['GmtCreatedTime'] = self.gmt_created_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.name is not None: + result['Name'] = self.name + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.resource_type is not None: + result['ResourceType'] = self.resource_type + if self.status is not None: + result['Status'] = self.status + if self.support_rdma is not None: + result['SupportRDMA'] = self.support_rdma + result['Tags'] = [] + if self.tags is not None: + for k in self.tags: + result['Tags'].append(k.to_map() if k else None) + if self.user_vpc is not None: + result['UserVpc'] = self.user_vpc.to_map() + if self.workspace_id is not None: + result['WorkspaceID'] = self.workspace_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ClusterID') is not None: + self.cluster_id = m.get('ClusterID') + if m.get('ComputingResourceProvider') is not None: + self.computing_resource_provider = m.get('ComputingResourceProvider') + if m.get('CreatorID') is not None: + self.creator_id = m.get('CreatorID') + if m.get('Description') is not None: + self.description = m.get('Description') + if m.get('GmtCreatedTime') is not None: + self.gmt_created_time = m.get('GmtCreatedTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('ResourceType') is not None: + self.resource_type = m.get('ResourceType') + if m.get('Status') is not None: + self.status = m.get('Status') + if m.get('SupportRDMA') is not None: + self.support_rdma = m.get('SupportRDMA') + self.tags = [] + if m.get('Tags') is not None: + for k in m.get('Tags'): + temp_model = GetResourceGroupResponseBodyTags() + self.tags.append(temp_model.from_map(k)) + if m.get('UserVpc') is not None: + temp_model = UserVpc() + self.user_vpc = temp_model.from_map(m['UserVpc']) + if m.get('WorkspaceID') is not None: + self.workspace_id = m.get('WorkspaceID') + return self + + +class GetResourceGroupResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetResourceGroupResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetResourceGroupResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class GetResourceGroupMachineGroupRequestTag(TeaModel): + def __init__( + self, + key: str = None, + value: str = None, + ): + self.key = key + self.value = value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + +class GetResourceGroupMachineGroupRequest(TeaModel): + def __init__( + self, + tag: List[GetResourceGroupMachineGroupRequestTag] = None, + ): + self.tag = tag + + def validate(self): + if self.tag: + for k in self.tag: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + result['Tag'] = [] + if self.tag is not None: + for k in self.tag: + result['Tag'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + self.tag = [] + if m.get('Tag') is not None: + for k in m.get('Tag'): + temp_model = GetResourceGroupMachineGroupRequestTag() + self.tag.append(temp_model.from_map(k)) + return self + + +class GetResourceGroupMachineGroupShrinkRequest(TeaModel): + def __init__( + self, + tag_shrink: str = None, + ): + self.tag_shrink = tag_shrink + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.tag_shrink is not None: + result['Tag'] = self.tag_shrink + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Tag') is not None: + self.tag_shrink = m.get('Tag') + return self + + +class GetResourceGroupMachineGroupResponseBodyTags(TeaModel): + def __init__( + self, + tag_key: str = None, + tag_value: str = None, + ): + self.tag_key = tag_key + self.tag_value = tag_value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.tag_key is not None: + result['TagKey'] = self.tag_key + if self.tag_value is not None: + result['TagValue'] = self.tag_value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('TagKey') is not None: + self.tag_key = m.get('TagKey') + if m.get('TagValue') is not None: + self.tag_value = m.get('TagValue') + return self + + +class GetResourceGroupMachineGroupResponseBody(TeaModel): + def __init__( + self, + cpu: str = None, + default_driver: str = None, + ecs_count: int = None, + ecs_spec: str = None, + gmt_created_time: str = None, + gmt_expired_time: str = None, + gmt_modified_time: str = None, + gmt_started_time: str = None, + gpu: str = None, + gpu_type: str = None, + machine_group_id: str = None, + memory: str = None, + name: str = None, + payment_duration: str = None, + payment_duration_unit: str = None, + payment_type: str = None, + request_id: str = None, + resource_group_id: str = None, + status: str = None, + supported_drivers: List[str] = None, + tags: List[GetResourceGroupMachineGroupResponseBodyTags] = None, + ): + self.cpu = cpu + self.default_driver = default_driver + self.ecs_count = ecs_count + self.ecs_spec = ecs_spec + self.gmt_created_time = gmt_created_time + self.gmt_expired_time = gmt_expired_time + self.gmt_modified_time = gmt_modified_time + self.gmt_started_time = gmt_started_time + self.gpu = gpu + self.gpu_type = gpu_type + self.machine_group_id = machine_group_id + self.memory = memory + self.name = name + self.payment_duration = payment_duration + self.payment_duration_unit = payment_duration_unit + self.payment_type = payment_type + self.request_id = request_id + self.resource_group_id = resource_group_id + self.status = status + self.supported_drivers = supported_drivers + self.tags = tags + + def validate(self): + if self.tags: + for k in self.tags: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.cpu is not None: + result['Cpu'] = self.cpu + if self.default_driver is not None: + result['DefaultDriver'] = self.default_driver + if self.ecs_count is not None: + result['EcsCount'] = self.ecs_count + if self.ecs_spec is not None: + result['EcsSpec'] = self.ecs_spec + if self.gmt_created_time is not None: + result['GmtCreatedTime'] = self.gmt_created_time + if self.gmt_expired_time is not None: + result['GmtExpiredTime'] = self.gmt_expired_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.gmt_started_time is not None: + result['GmtStartedTime'] = self.gmt_started_time + if self.gpu is not None: + result['Gpu'] = self.gpu + if self.gpu_type is not None: + result['GpuType'] = self.gpu_type + if self.machine_group_id is not None: + result['MachineGroupID'] = self.machine_group_id + if self.memory is not None: + result['Memory'] = self.memory + if self.name is not None: + result['Name'] = self.name + if self.payment_duration is not None: + result['PaymentDuration'] = self.payment_duration + if self.payment_duration_unit is not None: + result['PaymentDurationUnit'] = self.payment_duration_unit + if self.payment_type is not None: + result['PaymentType'] = self.payment_type + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.resource_group_id is not None: + result['ResourceGroupID'] = self.resource_group_id + if self.status is not None: + result['Status'] = self.status + if self.supported_drivers is not None: + result['SupportedDrivers'] = self.supported_drivers + result['Tags'] = [] + if self.tags is not None: + for k in self.tags: + result['Tags'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Cpu') is not None: + self.cpu = m.get('Cpu') + if m.get('DefaultDriver') is not None: + self.default_driver = m.get('DefaultDriver') + if m.get('EcsCount') is not None: + self.ecs_count = m.get('EcsCount') + if m.get('EcsSpec') is not None: + self.ecs_spec = m.get('EcsSpec') + if m.get('GmtCreatedTime') is not None: + self.gmt_created_time = m.get('GmtCreatedTime') + if m.get('GmtExpiredTime') is not None: + self.gmt_expired_time = m.get('GmtExpiredTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('GmtStartedTime') is not None: + self.gmt_started_time = m.get('GmtStartedTime') + if m.get('Gpu') is not None: + self.gpu = m.get('Gpu') + if m.get('GpuType') is not None: + self.gpu_type = m.get('GpuType') + if m.get('MachineGroupID') is not None: + self.machine_group_id = m.get('MachineGroupID') + if m.get('Memory') is not None: + self.memory = m.get('Memory') + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('PaymentDuration') is not None: + self.payment_duration = m.get('PaymentDuration') + if m.get('PaymentDurationUnit') is not None: + self.payment_duration_unit = m.get('PaymentDurationUnit') + if m.get('PaymentType') is not None: + self.payment_type = m.get('PaymentType') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('ResourceGroupID') is not None: + self.resource_group_id = m.get('ResourceGroupID') + if m.get('Status') is not None: + self.status = m.get('Status') + if m.get('SupportedDrivers') is not None: + self.supported_drivers = m.get('SupportedDrivers') + self.tags = [] + if m.get('Tags') is not None: + for k in m.get('Tags'): + temp_model = GetResourceGroupMachineGroupResponseBodyTags() + self.tags.append(temp_model.from_map(k)) + return self + + +class GetResourceGroupMachineGroupResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetResourceGroupMachineGroupResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetResourceGroupMachineGroupResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class GetResourceGroupMetricsRequest(TeaModel): + def __init__( + self, + end_time: str = None, + gputype: str = None, + start_time: str = None, + time_step: str = None, + ): + self.end_time = end_time + self.gputype = gputype + self.start_time = start_time + self.time_step = time_step + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.end_time is not None: + result['EndTime'] = self.end_time + if self.gputype is not None: + result['GPUType'] = self.gputype + if self.start_time is not None: + result['StartTime'] = self.start_time + if self.time_step is not None: + result['TimeStep'] = self.time_step + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('EndTime') is not None: + self.end_time = m.get('EndTime') + if m.get('GPUType') is not None: + self.gputype = m.get('GPUType') + if m.get('StartTime') is not None: + self.start_time = m.get('StartTime') + if m.get('TimeStep') is not None: + self.time_step = m.get('TimeStep') + return self + + +class GetResourceGroupMetricsResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + resource_group_id: str = None, + resource_group_metrics: List[ResourceGroupMetric] = None, + ): + self.request_id = request_id + self.resource_group_id = resource_group_id + self.resource_group_metrics = resource_group_metrics + + def validate(self): + if self.resource_group_metrics: + for k in self.resource_group_metrics: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.resource_group_id is not None: + result['ResourceGroupID'] = self.resource_group_id + result['ResourceGroupMetrics'] = [] + if self.resource_group_metrics is not None: + for k in self.resource_group_metrics: + result['ResourceGroupMetrics'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('ResourceGroupID') is not None: + self.resource_group_id = m.get('ResourceGroupID') + self.resource_group_metrics = [] + if m.get('ResourceGroupMetrics') is not None: + for k in m.get('ResourceGroupMetrics'): + temp_model = ResourceGroupMetric() + self.resource_group_metrics.append(temp_model.from_map(k)) + return self + + +class GetResourceGroupMetricsResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetResourceGroupMetricsResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetResourceGroupMetricsResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class GetResourceGroupRequestRequest(TeaModel): + def __init__( + self, + pod_status: str = None, + resource_group_id: str = None, + ): + self.pod_status = pod_status + # This parameter is required. + self.resource_group_id = resource_group_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.pod_status is not None: + result['PodStatus'] = self.pod_status + if self.resource_group_id is not None: + result['ResourceGroupID'] = self.resource_group_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('PodStatus') is not None: + self.pod_status = m.get('PodStatus') + if m.get('ResourceGroupID') is not None: + self.resource_group_id = m.get('ResourceGroupID') + return self + + +class GetResourceGroupRequestResponseBody(TeaModel): + def __init__( + self, + request_cpu: int = None, + request_gpu: int = None, + request_gpuinfos: List[GPUInfo] = None, + request_memory: int = None, + ): + self.request_cpu = request_cpu + self.request_gpu = request_gpu + self.request_gpuinfos = request_gpuinfos + self.request_memory = request_memory + + def validate(self): + if self.request_gpuinfos: + for k in self.request_gpuinfos: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_cpu is not None: + result['requestCPU'] = self.request_cpu + if self.request_gpu is not None: + result['requestGPU'] = self.request_gpu + result['requestGPUInfos'] = [] + if self.request_gpuinfos is not None: + for k in self.request_gpuinfos: + result['requestGPUInfos'].append(k.to_map() if k else None) + if self.request_memory is not None: + result['requestMemory'] = self.request_memory + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('requestCPU') is not None: + self.request_cpu = m.get('requestCPU') + if m.get('requestGPU') is not None: + self.request_gpu = m.get('requestGPU') + self.request_gpuinfos = [] + if m.get('requestGPUInfos') is not None: + for k in m.get('requestGPUInfos'): + temp_model = GPUInfo() + self.request_gpuinfos.append(temp_model.from_map(k)) + if m.get('requestMemory') is not None: + self.request_memory = m.get('requestMemory') + return self + + +class GetResourceGroupRequestResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetResourceGroupRequestResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetResourceGroupRequestResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class GetResourceGroupTotalRequest(TeaModel): + def __init__( + self, + resource_group_id: str = None, + ): + self.resource_group_id = resource_group_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.resource_group_id is not None: + result['ResourceGroupID'] = self.resource_group_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ResourceGroupID') is not None: + self.resource_group_id = m.get('ResourceGroupID') + return self + + +class GetResourceGroupTotalResponseBody(TeaModel): + def __init__( + self, + total_cpu: int = None, + total_gpu: int = None, + total_gpuinfos: List[GPUInfo] = None, + total_memory: int = None, + ): + self.total_cpu = total_cpu + self.total_gpu = total_gpu + self.total_gpuinfos = total_gpuinfos + self.total_memory = total_memory + + def validate(self): + if self.total_gpuinfos: + for k in self.total_gpuinfos: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.total_cpu is not None: + result['totalCPU'] = self.total_cpu + if self.total_gpu is not None: + result['totalGPU'] = self.total_gpu + result['totalGPUInfos'] = [] + if self.total_gpuinfos is not None: + for k in self.total_gpuinfos: + result['totalGPUInfos'].append(k.to_map() if k else None) + if self.total_memory is not None: + result['totalMemory'] = self.total_memory + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('totalCPU') is not None: + self.total_cpu = m.get('totalCPU') + if m.get('totalGPU') is not None: + self.total_gpu = m.get('totalGPU') + self.total_gpuinfos = [] + if m.get('totalGPUInfos') is not None: + for k in m.get('totalGPUInfos'): + temp_model = GPUInfo() + self.total_gpuinfos.append(temp_model.from_map(k)) + if m.get('totalMemory') is not None: + self.total_memory = m.get('totalMemory') + return self + + +class GetResourceGroupTotalResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetResourceGroupTotalResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetResourceGroupTotalResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class GetServiceIdentityRoleResponseBody(TeaModel): + def __init__( + self, + exist: bool = None, + request_id: str = None, + role_name: str = None, + ): + self.exist = exist + self.request_id = request_id + self.role_name = role_name + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.exist is not None: + result['Exist'] = self.exist + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.role_name is not None: + result['RoleName'] = self.role_name + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Exist') is not None: + self.exist = m.get('Exist') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('RoleName') is not None: + self.role_name = m.get('RoleName') + return self + + +class GetServiceIdentityRoleResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetServiceIdentityRoleResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetServiceIdentityRoleResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class GetSpotPriceHistoryRequest(TeaModel): + def __init__( + self, + end_time: str = None, + order: str = None, + page_number: int = None, + page_size: int = None, + sort_by: str = None, + start_time: str = None, + ): + self.end_time = end_time + self.order = order + self.page_number = page_number + self.page_size = page_size + self.sort_by = sort_by + self.start_time = start_time + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.end_time is not None: + result['EndTime'] = self.end_time + if self.order is not None: + result['Order'] = self.order + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.sort_by is not None: + result['SortBy'] = self.sort_by + if self.start_time is not None: + result['StartTime'] = self.start_time + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('EndTime') is not None: + self.end_time = m.get('EndTime') + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') + if m.get('StartTime') is not None: + self.start_time = m.get('StartTime') + return self + + +class GetSpotPriceHistoryResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + spot_price_history: List[SpotPriceItem] = None, + total_count: int = None, + ): + self.request_id = request_id + self.spot_price_history = spot_price_history + self.total_count = total_count + + def validate(self): + if self.spot_price_history: + for k in self.spot_price_history: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + result['SpotPriceHistory'] = [] + if self.spot_price_history is not None: + for k in self.spot_price_history: + result['SpotPriceHistory'].append(k.to_map() if k else None) + if self.total_count is not None: + result['TotalCount'] = self.total_count + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + self.spot_price_history = [] + if m.get('SpotPriceHistory') is not None: + for k in m.get('SpotPriceHistory'): + temp_model = SpotPriceItem() + self.spot_price_history.append(temp_model.from_map(k)) + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') + return self + + +class GetSpotPriceHistoryResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetSpotPriceHistoryResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetSpotPriceHistoryResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class GetSpotStockPreviewResponseBody(TeaModel): + def __init__( + self, + instance_type: str = None, + request_id: str = None, + stock_status: str = None, + ): + self.instance_type = instance_type + self.request_id = request_id + self.stock_status = stock_status + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.instance_type is not None: + result['InstanceType'] = self.instance_type + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.stock_status is not None: + result['StockStatus'] = self.stock_status + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('InstanceType') is not None: + self.instance_type = m.get('InstanceType') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('StockStatus') is not None: + self.stock_status = m.get('StockStatus') + return self + + +class GetSpotStockPreviewResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetSpotStockPreviewResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetSpotStockPreviewResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class GetTokenRequest(TeaModel): + def __init__( + self, + expire_time: int = None, + training_job_id: str = None, + ): + self.expire_time = expire_time + # This parameter is required. + self.training_job_id = training_job_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.expire_time is not None: + result['ExpireTime'] = self.expire_time + if self.training_job_id is not None: + result['TrainingJobId'] = self.training_job_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ExpireTime') is not None: + self.expire_time = m.get('ExpireTime') + if m.get('TrainingJobId') is not None: + self.training_job_id = m.get('TrainingJobId') + return self + + +class GetTokenResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + token: str = None, + ): + self.request_id = request_id + self.token = token + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.token is not None: + result['Token'] = self.token + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('Token') is not None: + self.token = m.get('Token') + return self + + +class GetTokenResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetTokenResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetTokenResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class GetTrainingJobRequest(TeaModel): + def __init__( + self, + token: str = None, + ): + self.token = token + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.token is not None: + result['Token'] = self.token + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Token') is not None: + self.token = m.get('Token') + return self + + +class GetTrainingJobResponseBodyComputeResourceInstanceSpec(TeaModel): + def __init__( + self, + cpu: str = None, + gpu: str = None, + gputype: str = None, + memory: str = None, + shared_memory: str = None, + ): + self.cpu = cpu + self.gpu = gpu + self.gputype = gputype + self.memory = memory + self.shared_memory = shared_memory + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.cpu is not None: + result['CPU'] = self.cpu + if self.gpu is not None: + result['GPU'] = self.gpu + if self.gputype is not None: + result['GPUType'] = self.gputype + if self.memory is not None: + result['Memory'] = self.memory + if self.shared_memory is not None: + result['SharedMemory'] = self.shared_memory + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('CPU') is not None: + self.cpu = m.get('CPU') + if m.get('GPU') is not None: + self.gpu = m.get('GPU') + if m.get('GPUType') is not None: + self.gputype = m.get('GPUType') + if m.get('Memory') is not None: + self.memory = m.get('Memory') + if m.get('SharedMemory') is not None: + self.shared_memory = m.get('SharedMemory') + return self + + +class GetTrainingJobResponseBodyComputeResourceSpotSpec(TeaModel): + def __init__( + self, + spot_discount_limit: float = None, + spot_strategy: str = None, + ): + self.spot_discount_limit = spot_discount_limit + self.spot_strategy = spot_strategy + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.spot_discount_limit is not None: + result['SpotDiscountLimit'] = self.spot_discount_limit + if self.spot_strategy is not None: + result['SpotStrategy'] = self.spot_strategy + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('SpotDiscountLimit') is not None: + self.spot_discount_limit = m.get('SpotDiscountLimit') + if m.get('SpotStrategy') is not None: + self.spot_strategy = m.get('SpotStrategy') + return self + + +class GetTrainingJobResponseBodyComputeResource(TeaModel): + def __init__( + self, + ecs_count: int = None, + ecs_spec: str = None, + instance_count: int = None, + instance_spec: GetTrainingJobResponseBodyComputeResourceInstanceSpec = None, + resource_id: str = None, + spot_spec: GetTrainingJobResponseBodyComputeResourceSpotSpec = None, + use_spot_instance: bool = None, + ): + self.ecs_count = ecs_count + self.ecs_spec = ecs_spec + self.instance_count = instance_count + self.instance_spec = instance_spec + self.resource_id = resource_id + self.spot_spec = spot_spec + self.use_spot_instance = use_spot_instance + + def validate(self): + if self.instance_spec: + self.instance_spec.validate() + if self.spot_spec: + self.spot_spec.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.ecs_count is not None: + result['EcsCount'] = self.ecs_count + if self.ecs_spec is not None: + result['EcsSpec'] = self.ecs_spec + if self.instance_count is not None: + result['InstanceCount'] = self.instance_count + if self.instance_spec is not None: + result['InstanceSpec'] = self.instance_spec.to_map() + if self.resource_id is not None: + result['ResourceId'] = self.resource_id + if self.spot_spec is not None: + result['SpotSpec'] = self.spot_spec.to_map() + if self.use_spot_instance is not None: + result['UseSpotInstance'] = self.use_spot_instance + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('EcsCount') is not None: + self.ecs_count = m.get('EcsCount') + if m.get('EcsSpec') is not None: + self.ecs_spec = m.get('EcsSpec') + if m.get('InstanceCount') is not None: + self.instance_count = m.get('InstanceCount') + if m.get('InstanceSpec') is not None: + temp_model = GetTrainingJobResponseBodyComputeResourceInstanceSpec() + self.instance_spec = temp_model.from_map(m['InstanceSpec']) + if m.get('ResourceId') is not None: + self.resource_id = m.get('ResourceId') + if m.get('SpotSpec') is not None: + temp_model = GetTrainingJobResponseBodyComputeResourceSpotSpec() + self.spot_spec = temp_model.from_map(m['SpotSpec']) + if m.get('UseSpotInstance') is not None: + self.use_spot_instance = m.get('UseSpotInstance') + return self + + +class GetTrainingJobResponseBodyExperimentConfig(TeaModel): + def __init__( + self, + experiment_id: str = None, + experiment_name: str = None, + ): + self.experiment_id = experiment_id + self.experiment_name = experiment_name + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.experiment_id is not None: + result['ExperimentId'] = self.experiment_id + if self.experiment_name is not None: + result['ExperimentName'] = self.experiment_name + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ExperimentId') is not None: + self.experiment_id = m.get('ExperimentId') + if m.get('ExperimentName') is not None: + self.experiment_name = m.get('ExperimentName') + return self + + +class GetTrainingJobResponseBodyHyperParameters(TeaModel): + def __init__( + self, + name: str = None, + value: str = None, + ): + self.name = name + self.value = value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.name is not None: + result['Name'] = self.name + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + +class GetTrainingJobResponseBodyInputChannels(TeaModel): + def __init__( + self, + dataset_id: str = None, + input_uri: str = None, + name: str = None, + ): + self.dataset_id = dataset_id + self.input_uri = input_uri + self.name = name + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.dataset_id is not None: + result['DatasetId'] = self.dataset_id + if self.input_uri is not None: + result['InputUri'] = self.input_uri + if self.name is not None: + result['Name'] = self.name + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('DatasetId') is not None: + self.dataset_id = m.get('DatasetId') + if m.get('InputUri') is not None: + self.input_uri = m.get('InputUri') + if m.get('Name') is not None: + self.name = m.get('Name') + return self + + +class GetTrainingJobResponseBodyInstances(TeaModel): + def __init__( + self, + name: str = None, + role: str = None, + status: str = None, + ): + self.name = name + self.role = role + self.status = status + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.name is not None: + result['Name'] = self.name + if self.role is not None: + result['Role'] = self.role + if self.status is not None: + result['Status'] = self.status + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('Role') is not None: + self.role = m.get('Role') + if m.get('Status') is not None: + self.status = m.get('Status') + return self + + +class GetTrainingJobResponseBodyLabels(TeaModel): + def __init__( + self, + key: str = None, + value: str = None, + ): + self.key = key + self.value = value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + +class GetTrainingJobResponseBodyLatestMetrics(TeaModel): + def __init__( + self, + name: str = None, + timestamp: str = None, + value: float = None, + ): + self.name = name + self.timestamp = timestamp + self.value = value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.name is not None: + result['Name'] = self.name + if self.timestamp is not None: + result['Timestamp'] = self.timestamp + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('Timestamp') is not None: + self.timestamp = m.get('Timestamp') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + +class GetTrainingJobResponseBodyLatestProgressOverallProgress(TeaModel): + def __init__( + self, + timestamp: str = None, + value: float = None, + ): + self.timestamp = timestamp + self.value = value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.timestamp is not None: + result['Timestamp'] = self.timestamp + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Timestamp') is not None: + self.timestamp = m.get('Timestamp') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + +class GetTrainingJobResponseBodyLatestProgressRemainingTime(TeaModel): + def __init__( + self, + timestamp: str = None, + value: int = None, + ): + self.timestamp = timestamp + self.value = value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.timestamp is not None: + result['Timestamp'] = self.timestamp + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Timestamp') is not None: + self.timestamp = m.get('Timestamp') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + +class GetTrainingJobResponseBodyLatestProgress(TeaModel): + def __init__( + self, + overall_progress: GetTrainingJobResponseBodyLatestProgressOverallProgress = None, + remaining_time: GetTrainingJobResponseBodyLatestProgressRemainingTime = None, + ): + self.overall_progress = overall_progress + self.remaining_time = remaining_time + + def validate(self): + if self.overall_progress: + self.overall_progress.validate() + if self.remaining_time: + self.remaining_time.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.overall_progress is not None: + result['OverallProgress'] = self.overall_progress.to_map() + if self.remaining_time is not None: + result['RemainingTime'] = self.remaining_time.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('OverallProgress') is not None: + temp_model = GetTrainingJobResponseBodyLatestProgressOverallProgress() + self.overall_progress = temp_model.from_map(m['OverallProgress']) + if m.get('RemainingTime') is not None: + temp_model = GetTrainingJobResponseBodyLatestProgressRemainingTime() + self.remaining_time = temp_model.from_map(m['RemainingTime']) + return self + + +class GetTrainingJobResponseBodyOutputChannels(TeaModel): + def __init__( + self, + dataset_id: str = None, + name: str = None, + output_uri: str = None, + ): + self.dataset_id = dataset_id + self.name = name + self.output_uri = output_uri + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.dataset_id is not None: + result['DatasetId'] = self.dataset_id + if self.name is not None: + result['Name'] = self.name + if self.output_uri is not None: + result['OutputUri'] = self.output_uri + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('DatasetId') is not None: + self.dataset_id = m.get('DatasetId') + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('OutputUri') is not None: + self.output_uri = m.get('OutputUri') + return self + + +class GetTrainingJobResponseBodyOutputModel(TeaModel): + def __init__( + self, + output_channel_name: str = None, + uri: str = None, + ): + self.output_channel_name = output_channel_name + self.uri = uri + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.output_channel_name is not None: + result['OutputChannelName'] = self.output_channel_name + if self.uri is not None: + result['Uri'] = self.uri + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('OutputChannelName') is not None: + self.output_channel_name = m.get('OutputChannelName') + if m.get('Uri') is not None: + self.uri = m.get('Uri') + return self + + +class GetTrainingJobResponseBodyScheduler(TeaModel): + def __init__( + self, + max_running_time_in_seconds: int = None, + ): + self.max_running_time_in_seconds = max_running_time_in_seconds + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.max_running_time_in_seconds is not None: + result['MaxRunningTimeInSeconds'] = self.max_running_time_in_seconds + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('MaxRunningTimeInSeconds') is not None: + self.max_running_time_in_seconds = m.get('MaxRunningTimeInSeconds') + return self + + +class GetTrainingJobResponseBodySettings(TeaModel): + def __init__( + self, + aimaster_type: str = None, + enable_error_monitoring_in_aimaster: bool = None, + error_monitoring_args: str = None, + priority: int = None, + ): + self.aimaster_type = aimaster_type + self.enable_error_monitoring_in_aimaster = enable_error_monitoring_in_aimaster + self.error_monitoring_args = error_monitoring_args + self.priority = priority + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.aimaster_type is not None: + result['AIMasterType'] = self.aimaster_type + if self.enable_error_monitoring_in_aimaster is not None: + result['EnableErrorMonitoringInAIMaster'] = self.enable_error_monitoring_in_aimaster + if self.error_monitoring_args is not None: + result['ErrorMonitoringArgs'] = self.error_monitoring_args + if self.priority is not None: + result['Priority'] = self.priority + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('AIMasterType') is not None: + self.aimaster_type = m.get('AIMasterType') + if m.get('EnableErrorMonitoringInAIMaster') is not None: + self.enable_error_monitoring_in_aimaster = m.get('EnableErrorMonitoringInAIMaster') + if m.get('ErrorMonitoringArgs') is not None: + self.error_monitoring_args = m.get('ErrorMonitoringArgs') + if m.get('Priority') is not None: + self.priority = m.get('Priority') + return self + + +class GetTrainingJobResponseBodyStatusTransitions(TeaModel): + def __init__( + self, + end_time: str = None, + reason_code: str = None, + reason_message: str = None, + start_time: str = None, + status: str = None, + ): + self.end_time = end_time + self.reason_code = reason_code + self.reason_message = reason_message + self.start_time = start_time + self.status = status + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.end_time is not None: + result['EndTime'] = self.end_time + if self.reason_code is not None: + result['ReasonCode'] = self.reason_code + if self.reason_message is not None: + result['ReasonMessage'] = self.reason_message + if self.start_time is not None: + result['StartTime'] = self.start_time + if self.status is not None: + result['Status'] = self.status + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('EndTime') is not None: + self.end_time = m.get('EndTime') + if m.get('ReasonCode') is not None: + self.reason_code = m.get('ReasonCode') + if m.get('ReasonMessage') is not None: + self.reason_message = m.get('ReasonMessage') + if m.get('StartTime') is not None: + self.start_time = m.get('StartTime') + if m.get('Status') is not None: + self.status = m.get('Status') + return self + + +class GetTrainingJobResponseBodyUserVpc(TeaModel): + def __init__( + self, + extended_cidrs: List[str] = None, + security_group_id: str = None, + switch_id: str = None, + vpc_id: str = None, + ): + self.extended_cidrs = extended_cidrs + self.security_group_id = security_group_id + self.switch_id = switch_id + self.vpc_id = vpc_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.extended_cidrs is not None: + result['ExtendedCIDRs'] = self.extended_cidrs + if self.security_group_id is not None: + result['SecurityGroupId'] = self.security_group_id + if self.switch_id is not None: + result['SwitchId'] = self.switch_id + if self.vpc_id is not None: + result['VpcId'] = self.vpc_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ExtendedCIDRs') is not None: + self.extended_cidrs = m.get('ExtendedCIDRs') + if m.get('SecurityGroupId') is not None: + self.security_group_id = m.get('SecurityGroupId') + if m.get('SwitchId') is not None: + self.switch_id = m.get('SwitchId') + if m.get('VpcId') is not None: + self.vpc_id = m.get('VpcId') + return self + + +class GetTrainingJobResponseBody(TeaModel): + def __init__( + self, + algorithm_id: str = None, + algorithm_name: str = None, + algorithm_provider: str = None, + algorithm_spec: AlgorithmSpec = None, + algorithm_version: str = None, + compute_resource: GetTrainingJobResponseBodyComputeResource = None, + duration: int = None, + environments: Dict[str, str] = None, + experiment_config: GetTrainingJobResponseBodyExperimentConfig = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + hyper_parameters: List[GetTrainingJobResponseBodyHyperParameters] = None, + input_channels: List[GetTrainingJobResponseBodyInputChannels] = None, + instances: List[GetTrainingJobResponseBodyInstances] = None, + is_temp_algo: bool = None, + labels: List[GetTrainingJobResponseBodyLabels] = None, + latest_metrics: List[GetTrainingJobResponseBodyLatestMetrics] = None, + latest_progress: GetTrainingJobResponseBodyLatestProgress = None, + output_channels: List[GetTrainingJobResponseBodyOutputChannels] = None, + output_model: GetTrainingJobResponseBodyOutputModel = None, + python_requirements: List[str] = None, + reason_code: str = None, + reason_message: str = None, + request_id: str = None, + role_arn: str = None, + scheduler: GetTrainingJobResponseBodyScheduler = None, + settings: GetTrainingJobResponseBodySettings = None, + status: str = None, + status_transitions: List[GetTrainingJobResponseBodyStatusTransitions] = None, + training_job_description: str = None, + training_job_id: str = None, + training_job_name: str = None, + training_job_url: str = None, + user_id: str = None, + user_vpc: GetTrainingJobResponseBodyUserVpc = None, + workspace_id: str = None, + ): + self.algorithm_id = algorithm_id + self.algorithm_name = algorithm_name + self.algorithm_provider = algorithm_provider + self.algorithm_spec = algorithm_spec + self.algorithm_version = algorithm_version + self.compute_resource = compute_resource + self.duration = duration + self.environments = environments + self.experiment_config = experiment_config + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.hyper_parameters = hyper_parameters + self.input_channels = input_channels + self.instances = instances + self.is_temp_algo = is_temp_algo + self.labels = labels + self.latest_metrics = latest_metrics + self.latest_progress = latest_progress + self.output_channels = output_channels + self.output_model = output_model + self.python_requirements = python_requirements + self.reason_code = reason_code + self.reason_message = reason_message + self.request_id = request_id + self.role_arn = role_arn + self.scheduler = scheduler + self.settings = settings + self.status = status + self.status_transitions = status_transitions + self.training_job_description = training_job_description + self.training_job_id = training_job_id + self.training_job_name = training_job_name + self.training_job_url = training_job_url + self.user_id = user_id + self.user_vpc = user_vpc + self.workspace_id = workspace_id + + def validate(self): + if self.algorithm_spec: + self.algorithm_spec.validate() + if self.compute_resource: + self.compute_resource.validate() + if self.experiment_config: + self.experiment_config.validate() + if self.hyper_parameters: + for k in self.hyper_parameters: + if k: + k.validate() + if self.input_channels: + for k in self.input_channels: + if k: + k.validate() + if self.instances: + for k in self.instances: + if k: + k.validate() + if self.labels: + for k in self.labels: + if k: + k.validate() + if self.latest_metrics: + for k in self.latest_metrics: + if k: + k.validate() + if self.latest_progress: + self.latest_progress.validate() + if self.output_channels: + for k in self.output_channels: + if k: + k.validate() + if self.output_model: + self.output_model.validate() + if self.scheduler: + self.scheduler.validate() + if self.settings: + self.settings.validate() + if self.status_transitions: + for k in self.status_transitions: + if k: + k.validate() + if self.user_vpc: + self.user_vpc.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.algorithm_id is not None: + result['AlgorithmId'] = self.algorithm_id + if self.algorithm_name is not None: + result['AlgorithmName'] = self.algorithm_name + if self.algorithm_provider is not None: + result['AlgorithmProvider'] = self.algorithm_provider + if self.algorithm_spec is not None: + result['AlgorithmSpec'] = self.algorithm_spec.to_map() + if self.algorithm_version is not None: + result['AlgorithmVersion'] = self.algorithm_version + if self.compute_resource is not None: + result['ComputeResource'] = self.compute_resource.to_map() + if self.duration is not None: + result['Duration'] = self.duration + if self.environments is not None: + result['Environments'] = self.environments + if self.experiment_config is not None: + result['ExperimentConfig'] = self.experiment_config.to_map() + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + result['HyperParameters'] = [] + if self.hyper_parameters is not None: + for k in self.hyper_parameters: + result['HyperParameters'].append(k.to_map() if k else None) + result['InputChannels'] = [] + if self.input_channels is not None: + for k in self.input_channels: + result['InputChannels'].append(k.to_map() if k else None) + result['Instances'] = [] + if self.instances is not None: + for k in self.instances: + result['Instances'].append(k.to_map() if k else None) + if self.is_temp_algo is not None: + result['IsTempAlgo'] = self.is_temp_algo + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + result['LatestMetrics'] = [] + if self.latest_metrics is not None: + for k in self.latest_metrics: + result['LatestMetrics'].append(k.to_map() if k else None) + if self.latest_progress is not None: + result['LatestProgress'] = self.latest_progress.to_map() + result['OutputChannels'] = [] + if self.output_channels is not None: + for k in self.output_channels: + result['OutputChannels'].append(k.to_map() if k else None) + if self.output_model is not None: + result['OutputModel'] = self.output_model.to_map() + if self.python_requirements is not None: + result['PythonRequirements'] = self.python_requirements + if self.reason_code is not None: + result['ReasonCode'] = self.reason_code + if self.reason_message is not None: + result['ReasonMessage'] = self.reason_message + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.role_arn is not None: + result['RoleArn'] = self.role_arn + if self.scheduler is not None: + result['Scheduler'] = self.scheduler.to_map() + if self.settings is not None: + result['Settings'] = self.settings.to_map() + if self.status is not None: + result['Status'] = self.status + result['StatusTransitions'] = [] + if self.status_transitions is not None: + for k in self.status_transitions: + result['StatusTransitions'].append(k.to_map() if k else None) + if self.training_job_description is not None: + result['TrainingJobDescription'] = self.training_job_description + if self.training_job_id is not None: + result['TrainingJobId'] = self.training_job_id + if self.training_job_name is not None: + result['TrainingJobName'] = self.training_job_name + if self.training_job_url is not None: + result['TrainingJobUrl'] = self.training_job_url + if self.user_id is not None: + result['UserId'] = self.user_id + if self.user_vpc is not None: + result['UserVpc'] = self.user_vpc.to_map() + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('AlgorithmId') is not None: + self.algorithm_id = m.get('AlgorithmId') + if m.get('AlgorithmName') is not None: + self.algorithm_name = m.get('AlgorithmName') + if m.get('AlgorithmProvider') is not None: + self.algorithm_provider = m.get('AlgorithmProvider') + if m.get('AlgorithmSpec') is not None: + temp_model = AlgorithmSpec() + self.algorithm_spec = temp_model.from_map(m['AlgorithmSpec']) + if m.get('AlgorithmVersion') is not None: + self.algorithm_version = m.get('AlgorithmVersion') + if m.get('ComputeResource') is not None: + temp_model = GetTrainingJobResponseBodyComputeResource() + self.compute_resource = temp_model.from_map(m['ComputeResource']) + if m.get('Duration') is not None: + self.duration = m.get('Duration') + if m.get('Environments') is not None: + self.environments = m.get('Environments') + if m.get('ExperimentConfig') is not None: + temp_model = GetTrainingJobResponseBodyExperimentConfig() + self.experiment_config = temp_model.from_map(m['ExperimentConfig']) + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + self.hyper_parameters = [] + if m.get('HyperParameters') is not None: + for k in m.get('HyperParameters'): + temp_model = GetTrainingJobResponseBodyHyperParameters() + self.hyper_parameters.append(temp_model.from_map(k)) + self.input_channels = [] + if m.get('InputChannels') is not None: + for k in m.get('InputChannels'): + temp_model = GetTrainingJobResponseBodyInputChannels() + self.input_channels.append(temp_model.from_map(k)) + self.instances = [] + if m.get('Instances') is not None: + for k in m.get('Instances'): + temp_model = GetTrainingJobResponseBodyInstances() + self.instances.append(temp_model.from_map(k)) + if m.get('IsTempAlgo') is not None: + self.is_temp_algo = m.get('IsTempAlgo') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = GetTrainingJobResponseBodyLabels() + self.labels.append(temp_model.from_map(k)) + self.latest_metrics = [] + if m.get('LatestMetrics') is not None: + for k in m.get('LatestMetrics'): + temp_model = GetTrainingJobResponseBodyLatestMetrics() + self.latest_metrics.append(temp_model.from_map(k)) + if m.get('LatestProgress') is not None: + temp_model = GetTrainingJobResponseBodyLatestProgress() + self.latest_progress = temp_model.from_map(m['LatestProgress']) + self.output_channels = [] + if m.get('OutputChannels') is not None: + for k in m.get('OutputChannels'): + temp_model = GetTrainingJobResponseBodyOutputChannels() + self.output_channels.append(temp_model.from_map(k)) + if m.get('OutputModel') is not None: + temp_model = GetTrainingJobResponseBodyOutputModel() + self.output_model = temp_model.from_map(m['OutputModel']) + if m.get('PythonRequirements') is not None: + self.python_requirements = m.get('PythonRequirements') + if m.get('ReasonCode') is not None: + self.reason_code = m.get('ReasonCode') + if m.get('ReasonMessage') is not None: + self.reason_message = m.get('ReasonMessage') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('RoleArn') is not None: + self.role_arn = m.get('RoleArn') + if m.get('Scheduler') is not None: + temp_model = GetTrainingJobResponseBodyScheduler() + self.scheduler = temp_model.from_map(m['Scheduler']) + if m.get('Settings') is not None: + temp_model = GetTrainingJobResponseBodySettings() + self.settings = temp_model.from_map(m['Settings']) + if m.get('Status') is not None: + self.status = m.get('Status') + self.status_transitions = [] + if m.get('StatusTransitions') is not None: + for k in m.get('StatusTransitions'): + temp_model = GetTrainingJobResponseBodyStatusTransitions() + self.status_transitions.append(temp_model.from_map(k)) + if m.get('TrainingJobDescription') is not None: + self.training_job_description = m.get('TrainingJobDescription') + if m.get('TrainingJobId') is not None: + self.training_job_id = m.get('TrainingJobId') + if m.get('TrainingJobName') is not None: + self.training_job_name = m.get('TrainingJobName') + if m.get('TrainingJobUrl') is not None: + self.training_job_url = m.get('TrainingJobUrl') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('UserVpc') is not None: + temp_model = GetTrainingJobResponseBodyUserVpc() + self.user_vpc = temp_model.from_map(m['UserVpc']) + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + return self + + +class GetTrainingJobResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetTrainingJobResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetTrainingJobResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class GetTrainingJobErrorInfoRequest(TeaModel): + def __init__( + self, + token: str = None, + ): + self.token = token + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.token is not None: + result['Token'] = self.token + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Token') is not None: + self.token = m.get('Token') + return self + + +class GetTrainingJobErrorInfoResponseBodyErrorInfo(TeaModel): + def __init__( + self, + additional_info: str = None, + code: str = None, + message: str = None, + ): + self.additional_info = additional_info + self.code = code + self.message = message + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.additional_info is not None: + result['AdditionalInfo'] = self.additional_info + if self.code is not None: + result['Code'] = self.code + if self.message is not None: + result['Message'] = self.message + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('AdditionalInfo') is not None: + self.additional_info = m.get('AdditionalInfo') + if m.get('Code') is not None: + self.code = m.get('Code') + if m.get('Message') is not None: + self.message = m.get('Message') + return self + + +class GetTrainingJobErrorInfoResponseBody(TeaModel): + def __init__( + self, + error_info: GetTrainingJobErrorInfoResponseBodyErrorInfo = None, + request_id: str = None, + ): + self.error_info = error_info + self.request_id = request_id + + def validate(self): + if self.error_info: + self.error_info.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.error_info is not None: + result['ErrorInfo'] = self.error_info.to_map() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ErrorInfo') is not None: + temp_model = GetTrainingJobErrorInfoResponseBodyErrorInfo() + self.error_info = temp_model.from_map(m['ErrorInfo']) + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class GetTrainingJobErrorInfoResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetTrainingJobErrorInfoResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetTrainingJobErrorInfoResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class GetTrainingJobLatestMetricsRequest(TeaModel): + def __init__( + self, + names: str = None, + token: str = None, + ): + self.names = names + self.token = token + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.names is not None: + result['Names'] = self.names + if self.token is not None: + result['Token'] = self.token + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Names') is not None: + self.names = m.get('Names') + if m.get('Token') is not None: + self.token = m.get('Token') + return self + + +class GetTrainingJobLatestMetricsResponseBodyMetrics(TeaModel): + def __init__( + self, + name: str = None, + timestamp: str = None, + value: float = None, + ): + self.name = name + # Use the UTC time format: yyyy-MM-ddTHH:mmZ + self.timestamp = timestamp + self.value = value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.name is not None: + result['Name'] = self.name + if self.timestamp is not None: + result['Timestamp'] = self.timestamp + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('Timestamp') is not None: + self.timestamp = m.get('Timestamp') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + +class GetTrainingJobLatestMetricsResponseBody(TeaModel): + def __init__( + self, + metrics: List[GetTrainingJobLatestMetricsResponseBodyMetrics] = None, + request_id: str = None, + ): + self.metrics = metrics + self.request_id = request_id + + def validate(self): + if self.metrics: + for k in self.metrics: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + result['Metrics'] = [] + if self.metrics is not None: + for k in self.metrics: + result['Metrics'].append(k.to_map() if k else None) + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + self.metrics = [] + if m.get('Metrics') is not None: + for k in m.get('Metrics'): + temp_model = GetTrainingJobLatestMetricsResponseBodyMetrics() + self.metrics.append(temp_model.from_map(k)) + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class GetTrainingJobLatestMetricsResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetTrainingJobLatestMetricsResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetTrainingJobLatestMetricsResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class GetUserViewMetricsRequest(TeaModel): + def __init__( + self, + order: str = None, + page_number: str = None, + page_size: str = None, + sort_by: str = None, + time_step: str = None, + user_id: str = None, + workspace_id: str = None, + ): + self.order = order + # This parameter is required. + self.page_number = page_number + # This parameter is required. + self.page_size = page_size + self.sort_by = sort_by + self.time_step = time_step + self.user_id = user_id + self.workspace_id = workspace_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.order is not None: + result['Order'] = self.order + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.sort_by is not None: + result['SortBy'] = self.sort_by + if self.time_step is not None: + result['TimeStep'] = self.time_step + if self.user_id is not None: + result['UserId'] = self.user_id + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') + if m.get('TimeStep') is not None: + self.time_step = m.get('TimeStep') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + return self + + +class GetUserViewMetricsResponseBody(TeaModel): + def __init__( + self, + resource_group_id: str = None, + summary: UserViewMetric = None, + total: int = None, + user_metrics: List[UserViewMetric] = None, + ): + self.resource_group_id = resource_group_id + self.summary = summary + self.total = total + self.user_metrics = user_metrics + + def validate(self): + if self.summary: + self.summary.validate() + if self.user_metrics: + for k in self.user_metrics: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.resource_group_id is not None: + result['ResourceGroupId'] = self.resource_group_id + if self.summary is not None: + result['Summary'] = self.summary.to_map() + if self.total is not None: + result['Total'] = self.total + result['UserMetrics'] = [] + if self.user_metrics is not None: + for k in self.user_metrics: + result['UserMetrics'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ResourceGroupId') is not None: + self.resource_group_id = m.get('ResourceGroupId') + if m.get('Summary') is not None: + temp_model = UserViewMetric() + self.summary = temp_model.from_map(m['Summary']) + if m.get('Total') is not None: + self.total = m.get('Total') + self.user_metrics = [] + if m.get('UserMetrics') is not None: + for k in m.get('UserMetrics'): + temp_model = UserViewMetric() + self.user_metrics.append(temp_model.from_map(k)) + return self + + +class GetUserViewMetricsResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: GetUserViewMetricsResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = GetUserViewMetricsResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class ListAI4DSerivcesRequest(TeaModel): + def __init__( + self, + service_type: str = None, + workspace_id: str = None, + ): + self.service_type = service_type + # This parameter is required. + self.workspace_id = workspace_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.service_type is not None: + result['ServiceType'] = self.service_type + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ServiceType') is not None: + self.service_type = m.get('ServiceType') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + return self + + +class ListAI4DSerivcesResponseBodyServices(TeaModel): + def __init__( + self, + service_name: str = None, + service_type: str = None, + ): + self.service_name = service_name + self.service_type = service_type + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.service_name is not None: + result['ServiceName'] = self.service_name + if self.service_type is not None: + result['ServiceType'] = self.service_type + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ServiceName') is not None: + self.service_name = m.get('ServiceName') + if m.get('ServiceType') is not None: + self.service_type = m.get('ServiceType') + return self + + +class ListAI4DSerivcesResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + services: List[ListAI4DSerivcesResponseBodyServices] = None, + ): + self.request_id = request_id + self.services = services + + def validate(self): + if self.services: + for k in self.services: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + result['Services'] = [] + if self.services is not None: + for k in self.services: + result['Services'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + self.services = [] + if m.get('Services') is not None: + for k in m.get('Services'): + temp_model = ListAI4DSerivcesResponseBodyServices() + self.services.append(temp_model.from_map(k)) + return self + + +class ListAI4DSerivcesResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: ListAI4DSerivcesResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = ListAI4DSerivcesResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class ListAI4DServiceTemplatesRequest(TeaModel): + def __init__( + self, + service_type: str = None, + workspace_id: str = None, + ): + self.service_type = service_type + # This parameter is required. + self.workspace_id = workspace_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.service_type is not None: + result['ServiceType'] = self.service_type + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ServiceType') is not None: + self.service_type = m.get('ServiceType') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + return self + + +class ListAI4DServiceTemplatesResponseBodyServiceTemplatesLabels(TeaModel): + def __init__( + self, + key: str = None, + value: str = None, + ): + self.key = key + self.value = value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + +class ListAI4DServiceTemplatesResponseBodyServiceTemplates(TeaModel): + def __init__( + self, + inference_spec: Dict[str, Any] = None, + labels: List[ListAI4DServiceTemplatesResponseBodyServiceTemplatesLabels] = None, + service_template_description: str = None, + service_template_doc: str = None, + service_template_id: str = None, + service_template_name: str = None, + ): + self.inference_spec = inference_spec + self.labels = labels + self.service_template_description = service_template_description + self.service_template_doc = service_template_doc + self.service_template_id = service_template_id + self.service_template_name = service_template_name + + def validate(self): + if self.labels: + for k in self.labels: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.inference_spec is not None: + result['InferenceSpec'] = self.inference_spec + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.service_template_description is not None: + result['ServiceTemplateDescription'] = self.service_template_description + if self.service_template_doc is not None: + result['ServiceTemplateDoc'] = self.service_template_doc + if self.service_template_id is not None: + result['ServiceTemplateId'] = self.service_template_id + if self.service_template_name is not None: + result['ServiceTemplateName'] = self.service_template_name + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('InferenceSpec') is not None: + self.inference_spec = m.get('InferenceSpec') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = ListAI4DServiceTemplatesResponseBodyServiceTemplatesLabels() + self.labels.append(temp_model.from_map(k)) + if m.get('ServiceTemplateDescription') is not None: + self.service_template_description = m.get('ServiceTemplateDescription') + if m.get('ServiceTemplateDoc') is not None: + self.service_template_doc = m.get('ServiceTemplateDoc') + if m.get('ServiceTemplateId') is not None: + self.service_template_id = m.get('ServiceTemplateId') + if m.get('ServiceTemplateName') is not None: + self.service_template_name = m.get('ServiceTemplateName') + return self + + +class ListAI4DServiceTemplatesResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + service_templates: List[ListAI4DServiceTemplatesResponseBodyServiceTemplates] = None, + ): + self.request_id = request_id + self.service_templates = service_templates + + def validate(self): + if self.service_templates: + for k in self.service_templates: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + result['ServiceTemplates'] = [] + if self.service_templates is not None: + for k in self.service_templates: + result['ServiceTemplates'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + self.service_templates = [] + if m.get('ServiceTemplates') is not None: + for k in m.get('ServiceTemplates'): + temp_model = ListAI4DServiceTemplatesResponseBodyServiceTemplates() + self.service_templates.append(temp_model.from_map(k)) + return self + + +class ListAI4DServiceTemplatesResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: ListAI4DServiceTemplatesResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = ListAI4DServiceTemplatesResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class ListAlgorithmVersionsRequest(TeaModel): + def __init__( + self, + page_number: int = None, + page_size: int = None, + ): + self.page_number = page_number + self.page_size = page_size + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + return self + + +class ListAlgorithmVersionsResponseBodyAlgorithmVersions(TeaModel): + def __init__( + self, + algorithm_id: str = None, + algorithm_name: str = None, + algorithm_provider: str = None, + algorithm_version: str = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + tenant_id: str = None, + user_id: str = None, + ): + self.algorithm_id = algorithm_id + self.algorithm_name = algorithm_name + self.algorithm_provider = algorithm_provider + self.algorithm_version = algorithm_version + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.tenant_id = tenant_id + self.user_id = user_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.algorithm_id is not None: + result['AlgorithmId'] = self.algorithm_id + if self.algorithm_name is not None: + result['AlgorithmName'] = self.algorithm_name + if self.algorithm_provider is not None: + result['AlgorithmProvider'] = self.algorithm_provider + if self.algorithm_version is not None: + result['AlgorithmVersion'] = self.algorithm_version + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + if self.tenant_id is not None: + result['TenantId'] = self.tenant_id + if self.user_id is not None: + result['UserId'] = self.user_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('AlgorithmId') is not None: + self.algorithm_id = m.get('AlgorithmId') + if m.get('AlgorithmName') is not None: + self.algorithm_name = m.get('AlgorithmName') + if m.get('AlgorithmProvider') is not None: + self.algorithm_provider = m.get('AlgorithmProvider') + if m.get('AlgorithmVersion') is not None: + self.algorithm_version = m.get('AlgorithmVersion') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('TenantId') is not None: + self.tenant_id = m.get('TenantId') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + return self + + +class ListAlgorithmVersionsResponseBody(TeaModel): + def __init__( + self, + algorithm_versions: List[ListAlgorithmVersionsResponseBodyAlgorithmVersions] = None, + request_id: str = None, + total_count: int = None, + ): + self.algorithm_versions = algorithm_versions + self.request_id = request_id + self.total_count = total_count + + def validate(self): + if self.algorithm_versions: + for k in self.algorithm_versions: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + result['AlgorithmVersions'] = [] + if self.algorithm_versions is not None: + for k in self.algorithm_versions: + result['AlgorithmVersions'].append(k.to_map() if k else None) + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.total_count is not None: + result['TotalCount'] = self.total_count + return result + + def from_map(self, m: dict = None): + m = m or dict() + self.algorithm_versions = [] + if m.get('AlgorithmVersions') is not None: + for k in m.get('AlgorithmVersions'): + temp_model = ListAlgorithmVersionsResponseBodyAlgorithmVersions() + self.algorithm_versions.append(temp_model.from_map(k)) + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') + return self + + +class ListAlgorithmVersionsResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: ListAlgorithmVersionsResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = ListAlgorithmVersionsResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class ListAlgorithmsRequest(TeaModel): + def __init__( + self, + algorithm_id: str = None, + algorithm_name: str = None, + algorithm_provider: str = None, + page_number: int = None, + page_size: int = None, + workspace_id: str = None, + ): + self.algorithm_id = algorithm_id + self.algorithm_name = algorithm_name + self.algorithm_provider = algorithm_provider + self.page_number = page_number + self.page_size = page_size + self.workspace_id = workspace_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.algorithm_id is not None: + result['AlgorithmId'] = self.algorithm_id + if self.algorithm_name is not None: + result['AlgorithmName'] = self.algorithm_name + if self.algorithm_provider is not None: + result['AlgorithmProvider'] = self.algorithm_provider + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('AlgorithmId') is not None: + self.algorithm_id = m.get('AlgorithmId') + if m.get('AlgorithmName') is not None: + self.algorithm_name = m.get('AlgorithmName') + if m.get('AlgorithmProvider') is not None: + self.algorithm_provider = m.get('AlgorithmProvider') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + return self + + +class ListAlgorithmsResponseBodyAlgorithms(TeaModel): + def __init__( + self, + algorithm_description: str = None, + algorithm_id: str = None, + algorithm_name: str = None, + algorithm_provider: str = None, + display_name: str = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + user_id: str = None, + workspace_id: str = None, + ): + self.algorithm_description = algorithm_description + self.algorithm_id = algorithm_id + self.algorithm_name = algorithm_name + self.algorithm_provider = algorithm_provider + self.display_name = display_name + self.gmt_create_time = gmt_create_time + self.gmt_modified_time = gmt_modified_time + self.user_id = user_id + self.workspace_id = workspace_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.algorithm_description is not None: + result['AlgorithmDescription'] = self.algorithm_description + if self.algorithm_id is not None: + result['AlgorithmId'] = self.algorithm_id + if self.algorithm_name is not None: + result['AlgorithmName'] = self.algorithm_name + if self.algorithm_provider is not None: + result['AlgorithmProvider'] = self.algorithm_provider + if self.display_name is not None: + result['DisplayName'] = self.display_name + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time if self.user_id is not None: result['UserId'] = self.user_id - if self.user_vpc is not None: - result['UserVpc'] = self.user_vpc.to_map() if self.workspace_id is not None: result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('AlgorithmId') is not None: - self.algorithm_id = m.get('AlgorithmId') - if m.get('AlgorithmName') is not None: - self.algorithm_name = m.get('AlgorithmName') - if m.get('AlgorithmProvider') is not None: - self.algorithm_provider = m.get('AlgorithmProvider') - if m.get('AlgorithmSpec') is not None: - temp_model = AlgorithmSpec() - self.algorithm_spec = temp_model.from_map(m['AlgorithmSpec']) - if m.get('AlgorithmVersion') is not None: - self.algorithm_version = m.get('AlgorithmVersion') - if m.get('ComputeResource') is not None: - temp_model = GetTrainingJobResponseBodyComputeResource() - self.compute_resource = temp_model.from_map(m['ComputeResource']) - if m.get('GmtCreateTime') is not None: - self.gmt_create_time = m.get('GmtCreateTime') - if m.get('GmtModifiedTime') is not None: - self.gmt_modified_time = m.get('GmtModifiedTime') - self.hyper_parameters = [] - if m.get('HyperParameters') is not None: - for k in m.get('HyperParameters'): - temp_model = GetTrainingJobResponseBodyHyperParameters() - self.hyper_parameters.append(temp_model.from_map(k)) - self.input_channels = [] - if m.get('InputChannels') is not None: - for k in m.get('InputChannels'): - temp_model = GetTrainingJobResponseBodyInputChannels() - self.input_channels.append(temp_model.from_map(k)) - self.instances = [] - if m.get('Instances') is not None: - for k in m.get('Instances'): - temp_model = GetTrainingJobResponseBodyInstances() - self.instances.append(temp_model.from_map(k)) - if m.get('IsTempAlgo') is not None: - self.is_temp_algo = m.get('IsTempAlgo') - self.labels = [] - if m.get('Labels') is not None: - for k in m.get('Labels'): - temp_model = GetTrainingJobResponseBodyLabels() - self.labels.append(temp_model.from_map(k)) - self.latest_metrics = [] - if m.get('LatestMetrics') is not None: - for k in m.get('LatestMetrics'): - temp_model = GetTrainingJobResponseBodyLatestMetrics() - self.latest_metrics.append(temp_model.from_map(k)) - if m.get('LatestProgress') is not None: - temp_model = GetTrainingJobResponseBodyLatestProgress() - self.latest_progress = temp_model.from_map(m['LatestProgress']) - self.output_channels = [] - if m.get('OutputChannels') is not None: - for k in m.get('OutputChannels'): - temp_model = GetTrainingJobResponseBodyOutputChannels() - self.output_channels.append(temp_model.from_map(k)) - if m.get('ReasonCode') is not None: - self.reason_code = m.get('ReasonCode') - if m.get('ReasonMessage') is not None: - self.reason_message = m.get('ReasonMessage') - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') - if m.get('RoleArn') is not None: - self.role_arn = m.get('RoleArn') - if m.get('Scheduler') is not None: - temp_model = GetTrainingJobResponseBodyScheduler() - self.scheduler = temp_model.from_map(m['Scheduler']) - if m.get('Status') is not None: - self.status = m.get('Status') - self.status_transitions = [] - if m.get('StatusTransitions') is not None: - for k in m.get('StatusTransitions'): - temp_model = GetTrainingJobResponseBodyStatusTransitions() - self.status_transitions.append(temp_model.from_map(k)) - if m.get('TrainingJobDescription') is not None: - self.training_job_description = m.get('TrainingJobDescription') - if m.get('TrainingJobId') is not None: - self.training_job_id = m.get('TrainingJobId') - if m.get('TrainingJobName') is not None: - self.training_job_name = m.get('TrainingJobName') - if m.get('TrainingJobUrl') is not None: - self.training_job_url = m.get('TrainingJobUrl') + if m.get('AlgorithmDescription') is not None: + self.algorithm_description = m.get('AlgorithmDescription') + if m.get('AlgorithmId') is not None: + self.algorithm_id = m.get('AlgorithmId') + if m.get('AlgorithmName') is not None: + self.algorithm_name = m.get('AlgorithmName') + if m.get('AlgorithmProvider') is not None: + self.algorithm_provider = m.get('AlgorithmProvider') + if m.get('DisplayName') is not None: + self.display_name = m.get('DisplayName') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') + return self + + +class ListAlgorithmsResponseBody(TeaModel): + def __init__( + self, + algorithms: List[ListAlgorithmsResponseBodyAlgorithms] = None, + request_id: str = None, + total_count: int = None, + ): + self.algorithms = algorithms + self.request_id = request_id + self.total_count = total_count + + def validate(self): + if self.algorithms: + for k in self.algorithms: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + result['Algorithms'] = [] + if self.algorithms is not None: + for k in self.algorithms: + result['Algorithms'].append(k.to_map() if k else None) + if self.request_id is not None: + result['RequestId'] = self.request_id + if self.total_count is not None: + result['TotalCount'] = self.total_count + return result + + def from_map(self, m: dict = None): + m = m or dict() + self.algorithms = [] + if m.get('Algorithms') is not None: + for k in m.get('Algorithms'): + temp_model = ListAlgorithmsResponseBodyAlgorithms() + self.algorithms.append(temp_model.from_map(k)) + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') + return self + + +class ListAlgorithmsResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: ListAlgorithmsResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = ListAlgorithmsResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class ListComponentVersionSnapshotsRequest(TeaModel): + def __init__( + self, + component_id: str = None, + order: str = None, + page_number: int = None, + page_size: int = None, + snapshot_id: str = None, + sort_by: str = None, + version: str = None, + ): + # This parameter is required. + self.component_id = component_id + self.order = order + self.page_number = page_number + self.page_size = page_size + self.snapshot_id = snapshot_id + self.sort_by = sort_by + # This parameter is required. + self.version = version + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.component_id is not None: + result['ComponentId'] = self.component_id + if self.order is not None: + result['Order'] = self.order + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.snapshot_id is not None: + result['SnapshotId'] = self.snapshot_id + if self.sort_by is not None: + result['SortBy'] = self.sort_by + if self.version is not None: + result['Version'] = self.version + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ComponentId') is not None: + self.component_id = m.get('ComponentId') + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('SnapshotId') is not None: + self.snapshot_id = m.get('SnapshotId') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') + if m.get('Version') is not None: + self.version = m.get('Version') + return self + + +class ListComponentVersionSnapshotsResponseBodySnapshots(TeaModel): + def __init__( + self, + component_id: str = None, + description: str = None, + is_current_version: bool = None, + snapshot_id: str = None, + tenant_id: str = None, + user_id: str = None, + version: str = None, + workspace_id: str = None, + ): + self.component_id = component_id + self.description = description + self.is_current_version = is_current_version + self.snapshot_id = snapshot_id + self.tenant_id = tenant_id + self.user_id = user_id + self.version = version + self.workspace_id = workspace_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.component_id is not None: + result['ComponentId'] = self.component_id + if self.description is not None: + result['Description'] = self.description + if self.is_current_version is not None: + result['IsCurrentVersion'] = self.is_current_version + if self.snapshot_id is not None: + result['SnapshotId'] = self.snapshot_id + if self.tenant_id is not None: + result['TenantId'] = self.tenant_id + if self.user_id is not None: + result['UserId'] = self.user_id + if self.version is not None: + result['Version'] = self.version + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ComponentId') is not None: + self.component_id = m.get('ComponentId') + if m.get('Description') is not None: + self.description = m.get('Description') + if m.get('IsCurrentVersion') is not None: + self.is_current_version = m.get('IsCurrentVersion') + if m.get('SnapshotId') is not None: + self.snapshot_id = m.get('SnapshotId') + if m.get('TenantId') is not None: + self.tenant_id = m.get('TenantId') if m.get('UserId') is not None: self.user_id = m.get('UserId') - if m.get('UserVpc') is not None: - temp_model = GetTrainingJobResponseBodyUserVpc() - self.user_vpc = temp_model.from_map(m['UserVpc']) + if m.get('Version') is not None: + self.version = m.get('Version') if m.get('WorkspaceId') is not None: self.workspace_id = m.get('WorkspaceId') return self -class GetTrainingJobResponse(TeaModel): +class ListComponentVersionSnapshotsResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + snapshots: List[ListComponentVersionSnapshotsResponseBodySnapshots] = None, + total_count: int = None, + ): + self.request_id = request_id + self.snapshots = snapshots + self.total_count = total_count + + def validate(self): + if self.snapshots: + for k in self.snapshots: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + result['Snapshots'] = [] + if self.snapshots is not None: + for k in self.snapshots: + result['Snapshots'].append(k.to_map() if k else None) + if self.total_count is not None: + result['TotalCount'] = self.total_count + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + self.snapshots = [] + if m.get('Snapshots') is not None: + for k in m.get('Snapshots'): + temp_model = ListComponentVersionSnapshotsResponseBodySnapshots() + self.snapshots.append(temp_model.from_map(k)) + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') + return self + + +class ListComponentVersionSnapshotsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetTrainingJobResponseBody = None, + body: ListComponentVersionSnapshotsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -10609,19 +15313,27 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetTrainingJobResponseBody() + temp_model = ListComponentVersionSnapshotsResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetTrainingJobLatestMetricsRequest(TeaModel): +class ListComponentVersionsRequest(TeaModel): def __init__( self, - names: str = None, - token: str = None, + labels: Dict[str, str] = None, + order: str = None, + page_number: int = None, + page_size: int = None, + sort_by: str = None, + version: str = None, ): - self.names = names - self.token = token + self.labels = labels + self.order = order + self.page_number = page_number + self.page_size = page_size + self.sort_by = sort_by + self.version = version def validate(self): pass @@ -10632,34 +15344,128 @@ def to_map(self): return _map result = dict() - if self.names is not None: - result['Names'] = self.names - if self.token is not None: - result['Token'] = self.token + if self.labels is not None: + result['Labels'] = self.labels + if self.order is not None: + result['Order'] = self.order + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.sort_by is not None: + result['SortBy'] = self.sort_by + if self.version is not None: + result['Version'] = self.version return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Names') is not None: - self.names = m.get('Names') - if m.get('Token') is not None: - self.token = m.get('Token') + if m.get('Labels') is not None: + self.labels = m.get('Labels') + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') + if m.get('Version') is not None: + self.version = m.get('Version') + return self + + +class ListComponentVersionsShrinkRequest(TeaModel): + def __init__( + self, + labels_shrink: str = None, + order: str = None, + page_number: int = None, + page_size: int = None, + sort_by: str = None, + version: str = None, + ): + self.labels_shrink = labels_shrink + self.order = order + self.page_number = page_number + self.page_size = page_size + self.sort_by = sort_by + self.version = version + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.labels_shrink is not None: + result['Labels'] = self.labels_shrink + if self.order is not None: + result['Order'] = self.order + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.sort_by is not None: + result['SortBy'] = self.sort_by + if self.version is not None: + result['Version'] = self.version + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Labels') is not None: + self.labels_shrink = m.get('Labels') + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') + if m.get('Version') is not None: + self.version = m.get('Version') return self -class GetTrainingJobLatestMetricsResponseBodyMetrics(TeaModel): +class ListComponentVersionsResponseBodyComponentVersions(TeaModel): def __init__( self, + component_id: str = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + labels: List[Label] = None, name: str = None, - timestamp: str = None, - value: float = None, + provider: str = None, + status: str = None, + tenant_id: str = None, + user_id: str = None, + version: str = None, + workspace_id: str = None, ): + self.component_id = component_id + # Use the UTC time format: yyyy-MM-ddTHH:mmZ + self.gmt_create_time = gmt_create_time + # Use the UTC time format: yyyy-MM-ddTHH:mmZ + self.gmt_modified_time = gmt_modified_time + self.labels = labels self.name = name - self.timestamp = timestamp - self.value = value + self.provider = provider + self.status = status + self.tenant_id = tenant_id + self.user_id = user_id + self.version = version + self.workspace_id = workspace_id def validate(self): - pass + if self.labels: + for k in self.labels: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -10667,37 +15473,76 @@ def to_map(self): return _map result = dict() + if self.component_id is not None: + result['ComponentId'] = self.component_id + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) if self.name is not None: result['Name'] = self.name - if self.timestamp is not None: - result['Timestamp'] = self.timestamp - if self.value is not None: - result['Value'] = self.value + if self.provider is not None: + result['Provider'] = self.provider + if self.status is not None: + result['Status'] = self.status + if self.tenant_id is not None: + result['TenantId'] = self.tenant_id + if self.user_id is not None: + result['UserId'] = self.user_id + if self.version is not None: + result['Version'] = self.version + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() + if m.get('ComponentId') is not None: + self.component_id = m.get('ComponentId') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = Label() + self.labels.append(temp_model.from_map(k)) if m.get('Name') is not None: self.name = m.get('Name') - if m.get('Timestamp') is not None: - self.timestamp = m.get('Timestamp') - if m.get('Value') is not None: - self.value = m.get('Value') + if m.get('Provider') is not None: + self.provider = m.get('Provider') + if m.get('Status') is not None: + self.status = m.get('Status') + if m.get('TenantId') is not None: + self.tenant_id = m.get('TenantId') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('Version') is not None: + self.version = m.get('Version') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class GetTrainingJobLatestMetricsResponseBody(TeaModel): +class ListComponentVersionsResponseBody(TeaModel): def __init__( self, - metrics: List[GetTrainingJobLatestMetricsResponseBodyMetrics] = None, + component_versions: List[ListComponentVersionsResponseBodyComponentVersions] = None, request_id: str = None, + total_count: int = None, ): - self.metrics = metrics + self.component_versions = component_versions self.request_id = request_id + self.total_count = total_count def validate(self): - if self.metrics: - for k in self.metrics: + if self.component_versions: + for k in self.component_versions: if k: k.validate() @@ -10707,41 +15552,42 @@ def to_map(self): return _map result = dict() - result['Metrics'] = [] - if self.metrics is not None: - for k in self.metrics: - result['Metrics'].append(k.to_map() if k else None) + result['ComponentVersions'] = [] + if self.component_versions is not None: + for k in self.component_versions: + result['ComponentVersions'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id + if self.total_count is not None: + result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() - self.metrics = [] - if m.get('Metrics') is not None: - for k in m.get('Metrics'): - temp_model = GetTrainingJobLatestMetricsResponseBodyMetrics() - self.metrics.append(temp_model.from_map(k)) + self.component_versions = [] + if m.get('ComponentVersions') is not None: + for k in m.get('ComponentVersions'): + temp_model = ListComponentVersionsResponseBodyComponentVersions() + self.component_versions.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') return self -class GetTrainingJobLatestMetricsResponse(TeaModel): +class ListComponentVersionsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: GetTrainingJobLatestMetricsResponseBody = None, + body: ListComponentVersionsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -10766,28 +15612,34 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = GetTrainingJobLatestMetricsResponseBody() + temp_model = ListComponentVersionsResponseBody() self.body = temp_model.from_map(m['body']) return self -class GetUserViewMetricsRequest(TeaModel): +class ListComponentsRequest(TeaModel): def __init__( self, + component_id: str = None, + component_ids: str = None, + labels: Dict[str, Any] = None, + name: str = None, order: str = None, - page_number: str = None, - page_size: str = None, + page_number: int = None, + page_size: int = None, + provider: str = None, sort_by: str = None, - time_step: str = None, - user_id: str = None, workspace_id: str = None, ): + self.component_id = component_id + self.component_ids = component_ids + self.labels = labels + self.name = name self.order = order self.page_number = page_number self.page_size = page_size + self.provider = provider self.sort_by = sort_by - self.time_step = time_step - self.user_id = user_id self.workspace_id = workspace_id def validate(self): @@ -10799,114 +15651,80 @@ def to_map(self): return _map result = dict() + if self.component_id is not None: + result['ComponentId'] = self.component_id + if self.component_ids is not None: + result['ComponentIds'] = self.component_ids + if self.labels is not None: + result['Labels'] = self.labels + if self.name is not None: + result['Name'] = self.name if self.order is not None: result['Order'] = self.order if self.page_number is not None: result['PageNumber'] = self.page_number if self.page_size is not None: result['PageSize'] = self.page_size + if self.provider is not None: + result['Provider'] = self.provider if self.sort_by is not None: result['SortBy'] = self.sort_by - if self.time_step is not None: - result['TimeStep'] = self.time_step - if self.user_id is not None: - result['UserId'] = self.user_id if self.workspace_id is not None: result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() + if m.get('ComponentId') is not None: + self.component_id = m.get('ComponentId') + if m.get('ComponentIds') is not None: + self.component_ids = m.get('ComponentIds') + if m.get('Labels') is not None: + self.labels = m.get('Labels') + if m.get('Name') is not None: + self.name = m.get('Name') if m.get('Order') is not None: self.order = m.get('Order') if m.get('PageNumber') is not None: self.page_number = m.get('PageNumber') if m.get('PageSize') is not None: self.page_size = m.get('PageSize') + if m.get('Provider') is not None: + self.provider = m.get('Provider') if m.get('SortBy') is not None: self.sort_by = m.get('SortBy') - if m.get('TimeStep') is not None: - self.time_step = m.get('TimeStep') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') if m.get('WorkspaceId') is not None: self.workspace_id = m.get('WorkspaceId') return self -class GetUserViewMetricsResponseBody(TeaModel): - def __init__( - self, - resource_group_id: str = None, - summary: UserViewMetric = None, - total: int = None, - user_metrics: List[UserViewMetric] = None, - ): - self.resource_group_id = resource_group_id - self.summary = summary - self.total = total - self.user_metrics = user_metrics - - def validate(self): - if self.summary: - self.summary.validate() - if self.user_metrics: - for k in self.user_metrics: - if k: - k.validate() - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.resource_group_id is not None: - result['ResourceGroupId'] = self.resource_group_id - if self.summary is not None: - result['Summary'] = self.summary.to_map() - if self.total is not None: - result['Total'] = self.total - result['UserMetrics'] = [] - if self.user_metrics is not None: - for k in self.user_metrics: - result['UserMetrics'].append(k.to_map() if k else None) - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('ResourceGroupId') is not None: - self.resource_group_id = m.get('ResourceGroupId') - if m.get('Summary') is not None: - temp_model = UserViewMetric() - self.summary = temp_model.from_map(m['Summary']) - if m.get('Total') is not None: - self.total = m.get('Total') - self.user_metrics = [] - if m.get('UserMetrics') is not None: - for k in m.get('UserMetrics'): - temp_model = UserViewMetric() - self.user_metrics.append(temp_model.from_map(k)) - return self - - -class GetUserViewMetricsResponse(TeaModel): +class ListComponentsShrinkRequest(TeaModel): def __init__( self, - headers: Dict[str, str] = None, - status_code: int = None, - body: GetUserViewMetricsResponseBody = None, + component_id: str = None, + component_ids: str = None, + labels_shrink: str = None, + name: str = None, + order: str = None, + page_number: int = None, + page_size: int = None, + provider: str = None, + sort_by: str = None, + workspace_id: str = None, ): - self.headers = headers - self.status_code = status_code - self.body = body + self.component_id = component_id + self.component_ids = component_ids + self.labels_shrink = labels_shrink + self.name = name + self.order = order + self.page_number = page_number + self.page_size = page_size + self.provider = provider + self.sort_by = sort_by + self.workspace_id = workspace_id def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() + pass def to_map(self): _map = super().to_map() @@ -10914,34 +15732,66 @@ def to_map(self): return _map result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + if self.component_id is not None: + result['ComponentId'] = self.component_id + if self.component_ids is not None: + result['ComponentIds'] = self.component_ids + if self.labels_shrink is not None: + result['Labels'] = self.labels_shrink + if self.name is not None: + result['Name'] = self.name + if self.order is not None: + result['Order'] = self.order + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.provider is not None: + result['Provider'] = self.provider + if self.sort_by is not None: + result['SortBy'] = self.sort_by + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = GetUserViewMetricsResponseBody() - self.body = temp_model.from_map(m['body']) + if m.get('ComponentId') is not None: + self.component_id = m.get('ComponentId') + if m.get('ComponentIds') is not None: + self.component_ids = m.get('ComponentIds') + if m.get('Labels') is not None: + self.labels_shrink = m.get('Labels') + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('Provider') is not None: + self.provider = m.get('Provider') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class ListAI4DSerivcesRequest(TeaModel): +class ListComponentsResponseBodyComponentsVersions(TeaModel): def __init__( self, - service_type: str = None, - workspace_id: str = None, + gmt_create_time: str = None, + snapshot_id: str = None, + status: str = None, + version: str = None, ): - self.service_type = service_type - self.workspace_id = workspace_id + # Use the UTC time format: yyyy-MM-ddTHH:mmZ + self.gmt_create_time = gmt_create_time + self.snapshot_id = snapshot_id + self.status = status + self.version = version def validate(self): pass @@ -10952,32 +15802,69 @@ def to_map(self): return _map result = dict() - if self.service_type is not None: - result['ServiceType'] = self.service_type - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.snapshot_id is not None: + result['SnapshotId'] = self.snapshot_id + if self.status is not None: + result['Status'] = self.status + if self.version is not None: + result['Version'] = self.version return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ServiceType') is not None: - self.service_type = m.get('ServiceType') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('SnapshotId') is not None: + self.snapshot_id = m.get('SnapshotId') + if m.get('Status') is not None: + self.status = m.get('Status') + if m.get('Version') is not None: + self.version = m.get('Version') return self -class ListAI4DSerivcesResponseBodyServices(TeaModel): +class ListComponentsResponseBodyComponents(TeaModel): def __init__( self, - service_name: str = None, - service_type: str = None, + component_id: str = None, + description: str = None, + display_name: str = None, + gmt_create_time: str = None, + gmt_modified_time: str = None, + labels: List[Label] = None, + name: str = None, + provider: str = None, + tenant_id: str = None, + user_id: str = None, + versions: List[ListComponentsResponseBodyComponentsVersions] = None, + workspace_id: str = None, ): - self.service_name = service_name - self.service_type = service_type + self.component_id = component_id + self.description = description + self.display_name = display_name + # Use the UTC time format: yyyy-MM-ddTHH:mmZ + self.gmt_create_time = gmt_create_time + # Use the UTC time format: yyyy-MM-ddTHH:mmZ + self.gmt_modified_time = gmt_modified_time + self.labels = labels + self.name = name + self.provider = provider + self.tenant_id = tenant_id + self.user_id = user_id + self.versions = versions + self.workspace_id = workspace_id def validate(self): - pass + if self.labels: + for k in self.labels: + if k: + k.validate() + if self.versions: + for k in self.versions: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -10985,33 +15872,85 @@ def to_map(self): return _map result = dict() - if self.service_name is not None: - result['ServiceName'] = self.service_name - if self.service_type is not None: - result['ServiceType'] = self.service_type + if self.component_id is not None: + result['ComponentId'] = self.component_id + if self.description is not None: + result['Description'] = self.description + if self.display_name is not None: + result['DisplayName'] = self.display_name + if self.gmt_create_time is not None: + result['GmtCreateTime'] = self.gmt_create_time + if self.gmt_modified_time is not None: + result['GmtModifiedTime'] = self.gmt_modified_time + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.name is not None: + result['Name'] = self.name + if self.provider is not None: + result['Provider'] = self.provider + if self.tenant_id is not None: + result['TenantId'] = self.tenant_id + if self.user_id is not None: + result['UserId'] = self.user_id + result['Versions'] = [] + if self.versions is not None: + for k in self.versions: + result['Versions'].append(k.to_map() if k else None) + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ServiceName') is not None: - self.service_name = m.get('ServiceName') - if m.get('ServiceType') is not None: - self.service_type = m.get('ServiceType') + if m.get('ComponentId') is not None: + self.component_id = m.get('ComponentId') + if m.get('Description') is not None: + self.description = m.get('Description') + if m.get('DisplayName') is not None: + self.display_name = m.get('DisplayName') + if m.get('GmtCreateTime') is not None: + self.gmt_create_time = m.get('GmtCreateTime') + if m.get('GmtModifiedTime') is not None: + self.gmt_modified_time = m.get('GmtModifiedTime') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = Label() + self.labels.append(temp_model.from_map(k)) + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('Provider') is not None: + self.provider = m.get('Provider') + if m.get('TenantId') is not None: + self.tenant_id = m.get('TenantId') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + self.versions = [] + if m.get('Versions') is not None: + for k in m.get('Versions'): + temp_model = ListComponentsResponseBodyComponentsVersions() + self.versions.append(temp_model.from_map(k)) + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class ListAI4DSerivcesResponseBody(TeaModel): +class ListComponentsResponseBody(TeaModel): def __init__( self, + components: List[ListComponentsResponseBodyComponents] = None, request_id: str = None, - services: List[ListAI4DSerivcesResponseBodyServices] = None, + total_count: int = None, ): + self.components = components self.request_id = request_id - self.services = services + self.total_count = total_count def validate(self): - if self.services: - for k in self.services: + if self.components: + for k in self.components: if k: k.validate() @@ -11021,41 +15960,42 @@ def to_map(self): return _map result = dict() + result['Components'] = [] + if self.components is not None: + for k in self.components: + result['Components'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id - result['Services'] = [] - if self.services is not None: - for k in self.services: - result['Services'].append(k.to_map() if k else None) + if self.total_count is not None: + result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() + self.components = [] + if m.get('Components') is not None: + for k in m.get('Components'): + temp_model = ListComponentsResponseBodyComponents() + self.components.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - self.services = [] - if m.get('Services') is not None: - for k in m.get('Services'): - temp_model = ListAI4DSerivcesResponseBodyServices() - self.services.append(temp_model.from_map(k)) + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') return self -class ListAI4DSerivcesResponse(TeaModel): +class ListComponentsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListAI4DSerivcesResponseBody = None, + body: ListComponentsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -11080,19 +16020,27 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListAI4DSerivcesResponseBody() + temp_model = ListComponentsResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListAlgorithmVersionsRequest(TeaModel): +class ListInstanceJobsRequest(TeaModel): def __init__( self, + instance_job_type: str = None, + order: str = None, page_number: int = None, page_size: int = None, + sort_by: str = None, + status: str = None, ): + self.instance_job_type = instance_job_type + self.order = order self.page_number = page_number self.page_size = page_size + self.sort_by = sort_by + self.status = status def validate(self): pass @@ -11103,41 +16051,60 @@ def to_map(self): return _map result = dict() + if self.instance_job_type is not None: + result['InstanceJobType'] = self.instance_job_type + if self.order is not None: + result['Order'] = self.order if self.page_number is not None: result['PageNumber'] = self.page_number if self.page_size is not None: result['PageSize'] = self.page_size + if self.sort_by is not None: + result['SortBy'] = self.sort_by + if self.status is not None: + result['Status'] = self.status return result def from_map(self, m: dict = None): m = m or dict() + if m.get('InstanceJobType') is not None: + self.instance_job_type = m.get('InstanceJobType') + if m.get('Order') is not None: + self.order = m.get('Order') if m.get('PageNumber') is not None: self.page_number = m.get('PageNumber') if m.get('PageSize') is not None: self.page_size = m.get('PageSize') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') + if m.get('Status') is not None: + self.status = m.get('Status') return self -class ListAlgorithmVersionsResponseBodyAlgorithmVersions(TeaModel): +class ListInstanceJobsResponseBodyInstanceJobs(TeaModel): def __init__( self, - algorithm_id: str = None, - algorithm_name: str = None, - algorithm_provider: str = None, - algorithm_version: str = None, + creator: str = None, gmt_create_time: str = None, - gmt_modified_time: str = None, - tenant_id: str = None, - user_id: str = None, + instance_id: str = None, + instance_job_id: str = None, + instance_job_type: str = None, + reason_code: str = None, + reason_message: str = None, + status: str = None, + workspace_id: str = None, ): - self.algorithm_id = algorithm_id - self.algorithm_name = algorithm_name - self.algorithm_provider = algorithm_provider - self.algorithm_version = algorithm_version + self.creator = creator + # Use the UTC time format: yyyy-MM-ddTHH:mmZ self.gmt_create_time = gmt_create_time - self.gmt_modified_time = gmt_modified_time - self.tenant_id = tenant_id - self.user_id = user_id + self.instance_id = instance_id + self.instance_job_id = instance_job_id + self.instance_job_type = instance_job_type + self.reason_code = reason_code + self.reason_message = reason_message + self.status = status + self.workspace_id = workspace_id def validate(self): pass @@ -11148,61 +16115,63 @@ def to_map(self): return _map result = dict() - if self.algorithm_id is not None: - result['AlgorithmId'] = self.algorithm_id - if self.algorithm_name is not None: - result['AlgorithmName'] = self.algorithm_name - if self.algorithm_provider is not None: - result['AlgorithmProvider'] = self.algorithm_provider - if self.algorithm_version is not None: - result['AlgorithmVersion'] = self.algorithm_version + if self.creator is not None: + result['Creator'] = self.creator if self.gmt_create_time is not None: result['GmtCreateTime'] = self.gmt_create_time - if self.gmt_modified_time is not None: - result['GmtModifiedTime'] = self.gmt_modified_time - if self.tenant_id is not None: - result['TenantId'] = self.tenant_id - if self.user_id is not None: - result['UserId'] = self.user_id + if self.instance_id is not None: + result['InstanceId'] = self.instance_id + if self.instance_job_id is not None: + result['InstanceJobId'] = self.instance_job_id + if self.instance_job_type is not None: + result['InstanceJobType'] = self.instance_job_type + if self.reason_code is not None: + result['ReasonCode'] = self.reason_code + if self.reason_message is not None: + result['ReasonMessage'] = self.reason_message + if self.status is not None: + result['Status'] = self.status + if self.workspace_id is not None: + result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('AlgorithmId') is not None: - self.algorithm_id = m.get('AlgorithmId') - if m.get('AlgorithmName') is not None: - self.algorithm_name = m.get('AlgorithmName') - if m.get('AlgorithmProvider') is not None: - self.algorithm_provider = m.get('AlgorithmProvider') - if m.get('AlgorithmVersion') is not None: - self.algorithm_version = m.get('AlgorithmVersion') + if m.get('Creator') is not None: + self.creator = m.get('Creator') if m.get('GmtCreateTime') is not None: self.gmt_create_time = m.get('GmtCreateTime') - if m.get('GmtModifiedTime') is not None: - self.gmt_modified_time = m.get('GmtModifiedTime') - if m.get('TenantId') is not None: - self.tenant_id = m.get('TenantId') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') + if m.get('InstanceJobId') is not None: + self.instance_job_id = m.get('InstanceJobId') + if m.get('InstanceJobType') is not None: + self.instance_job_type = m.get('InstanceJobType') + if m.get('ReasonCode') is not None: + self.reason_code = m.get('ReasonCode') + if m.get('ReasonMessage') is not None: + self.reason_message = m.get('ReasonMessage') + if m.get('Status') is not None: + self.status = m.get('Status') + if m.get('WorkspaceId') is not None: + self.workspace_id = m.get('WorkspaceId') return self -class ListAlgorithmVersionsResponseBody(TeaModel): +class ListInstanceJobsResponseBody(TeaModel): def __init__( self, - algorithm_versions: List[ListAlgorithmVersionsResponseBodyAlgorithmVersions] = None, + instance_jobs: ListInstanceJobsResponseBodyInstanceJobs = None, request_id: str = None, total_count: int = None, ): - self.algorithm_versions = algorithm_versions + self.instance_jobs = instance_jobs self.request_id = request_id self.total_count = total_count def validate(self): - if self.algorithm_versions: - for k in self.algorithm_versions: - if k: - k.validate() + if self.instance_jobs: + self.instance_jobs.validate() def to_map(self): _map = super().to_map() @@ -11210,10 +16179,8 @@ def to_map(self): return _map result = dict() - result['AlgorithmVersions'] = [] - if self.algorithm_versions is not None: - for k in self.algorithm_versions: - result['AlgorithmVersions'].append(k.to_map() if k else None) + if self.instance_jobs is not None: + result['InstanceJobs'] = self.instance_jobs.to_map() if self.request_id is not None: result['RequestId'] = self.request_id if self.total_count is not None: @@ -11222,11 +16189,9 @@ def to_map(self): def from_map(self, m: dict = None): m = m or dict() - self.algorithm_versions = [] - if m.get('AlgorithmVersions') is not None: - for k in m.get('AlgorithmVersions'): - temp_model = ListAlgorithmVersionsResponseBodyAlgorithmVersions() - self.algorithm_versions.append(temp_model.from_map(k)) + if m.get('InstanceJobs') is not None: + temp_model = ListInstanceJobsResponseBodyInstanceJobs() + self.instance_jobs = temp_model.from_map(m['InstanceJobs']) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') if m.get('TotalCount') is not None: @@ -11234,21 +16199,18 @@ def from_map(self, m: dict = None): return self -class ListAlgorithmVersionsResponse(TeaModel): +class ListInstanceJobsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListAlgorithmVersionsResponseBody = None, + body: ListInstanceJobsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -11273,27 +16235,26 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListAlgorithmVersionsResponseBody() + temp_model = ListInstanceJobsResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListAlgorithmsRequest(TeaModel): +class ListNodeGPUMetricsRequest(TeaModel): def __init__( self, - algorithm_id: str = None, - algorithm_name: str = None, - algorithm_provider: str = None, - page_number: int = None, - page_size: int = None, - workspace_id: str = None, + end_time: str = None, + gputype: str = None, + metric_type: str = None, + node_type: str = None, + start_time: str = None, ): - self.algorithm_id = algorithm_id - self.algorithm_name = algorithm_name - self.algorithm_provider = algorithm_provider - self.page_number = page_number - self.page_size = page_size - self.workspace_id = workspace_id + self.end_time = end_time + self.gputype = gputype + # This parameter is required. + self.metric_type = metric_type + self.node_type = node_type + self.start_time = start_time def validate(self): pass @@ -11304,59 +16265,128 @@ def to_map(self): return _map result = dict() - if self.algorithm_id is not None: - result['AlgorithmId'] = self.algorithm_id - if self.algorithm_name is not None: - result['AlgorithmName'] = self.algorithm_name - if self.algorithm_provider is not None: - result['AlgorithmProvider'] = self.algorithm_provider - if self.page_number is not None: - result['PageNumber'] = self.page_number - if self.page_size is not None: - result['PageSize'] = self.page_size - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.end_time is not None: + result['EndTime'] = self.end_time + if self.gputype is not None: + result['GPUType'] = self.gputype + if self.metric_type is not None: + result['MetricType'] = self.metric_type + if self.node_type is not None: + result['NodeType'] = self.node_type + if self.start_time is not None: + result['StartTime'] = self.start_time return result def from_map(self, m: dict = None): m = m or dict() - if m.get('AlgorithmId') is not None: - self.algorithm_id = m.get('AlgorithmId') - if m.get('AlgorithmName') is not None: - self.algorithm_name = m.get('AlgorithmName') - if m.get('AlgorithmProvider') is not None: - self.algorithm_provider = m.get('AlgorithmProvider') - if m.get('PageNumber') is not None: - self.page_number = m.get('PageNumber') - if m.get('PageSize') is not None: - self.page_size = m.get('PageSize') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') + if m.get('EndTime') is not None: + self.end_time = m.get('EndTime') + if m.get('GPUType') is not None: + self.gputype = m.get('GPUType') + if m.get('MetricType') is not None: + self.metric_type = m.get('MetricType') + if m.get('NodeType') is not None: + self.node_type = m.get('NodeType') + if m.get('StartTime') is not None: + self.start_time = m.get('StartTime') + return self + + +class ListNodeGPUMetricsResponseBody(TeaModel): + def __init__( + self, + metric_type: str = None, + node_gpumetrics: List[NodeGPUMetric] = None, + quota_id: str = None, + ): + self.metric_type = metric_type + self.node_gpumetrics = node_gpumetrics + self.quota_id = quota_id + + def validate(self): + if self.node_gpumetrics: + for k in self.node_gpumetrics: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.metric_type is not None: + result['MetricType'] = self.metric_type + result['NodeGPUMetrics'] = [] + if self.node_gpumetrics is not None: + for k in self.node_gpumetrics: + result['NodeGPUMetrics'].append(k.to_map() if k else None) + if self.quota_id is not None: + result['QuotaId'] = self.quota_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('MetricType') is not None: + self.metric_type = m.get('MetricType') + self.node_gpumetrics = [] + if m.get('NodeGPUMetrics') is not None: + for k in m.get('NodeGPUMetrics'): + temp_model = NodeGPUMetric() + self.node_gpumetrics.append(temp_model.from_map(k)) + if m.get('QuotaId') is not None: + self.quota_id = m.get('QuotaId') + return self + + +class ListNodeGPUMetricsResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: ListNodeGPUMetricsResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = ListNodeGPUMetricsResponseBody() + self.body = temp_model.from_map(m['body']) return self -class ListAlgorithmsResponseBodyAlgorithms(TeaModel): +class ListNodePodsRequest(TeaModel): def __init__( self, - algorithm_description: str = None, - algorithm_id: str = None, - algorithm_name: str = None, - algorithm_provider: str = None, - display_name: str = None, - gmt_create_time: str = None, - gmt_modified_time: str = None, - user_id: str = None, - workspace_id: str = None, + resource_group_id: str = None, ): - self.algorithm_description = algorithm_description - self.algorithm_id = algorithm_id - self.algorithm_name = algorithm_name - self.algorithm_provider = algorithm_provider - self.display_name = display_name - self.gmt_create_time = gmt_create_time - self.gmt_modified_time = gmt_modified_time - self.user_id = user_id - self.workspace_id = workspace_id + # This parameter is required. + self.resource_group_id = resource_group_id def validate(self): pass @@ -11367,63 +16397,29 @@ def to_map(self): return _map result = dict() - if self.algorithm_description is not None: - result['AlgorithmDescription'] = self.algorithm_description - if self.algorithm_id is not None: - result['AlgorithmId'] = self.algorithm_id - if self.algorithm_name is not None: - result['AlgorithmName'] = self.algorithm_name - if self.algorithm_provider is not None: - result['AlgorithmProvider'] = self.algorithm_provider - if self.display_name is not None: - result['DisplayName'] = self.display_name - if self.gmt_create_time is not None: - result['GmtCreateTime'] = self.gmt_create_time - if self.gmt_modified_time is not None: - result['GmtModifiedTime'] = self.gmt_modified_time - if self.user_id is not None: - result['UserId'] = self.user_id - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.resource_group_id is not None: + result['ResourceGroupId'] = self.resource_group_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('AlgorithmDescription') is not None: - self.algorithm_description = m.get('AlgorithmDescription') - if m.get('AlgorithmId') is not None: - self.algorithm_id = m.get('AlgorithmId') - if m.get('AlgorithmName') is not None: - self.algorithm_name = m.get('AlgorithmName') - if m.get('AlgorithmProvider') is not None: - self.algorithm_provider = m.get('AlgorithmProvider') - if m.get('DisplayName') is not None: - self.display_name = m.get('DisplayName') - if m.get('GmtCreateTime') is not None: - self.gmt_create_time = m.get('GmtCreateTime') - if m.get('GmtModifiedTime') is not None: - self.gmt_modified_time = m.get('GmtModifiedTime') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') + if m.get('ResourceGroupId') is not None: + self.resource_group_id = m.get('ResourceGroupId') return self -class ListAlgorithmsResponseBody(TeaModel): +class ListNodePodsResponseBody(TeaModel): def __init__( self, - algorithms: List[ListAlgorithmsResponseBodyAlgorithms] = None, + node_pod_infos: List[NodePodInfo] = None, request_id: str = None, - total_count: int = None, ): - self.algorithms = algorithms + self.node_pod_infos = node_pod_infos self.request_id = request_id - self.total_count = total_count def validate(self): - if self.algorithms: - for k in self.algorithms: + if self.node_pod_infos: + for k in self.node_pod_infos: if k: k.validate() @@ -11433,45 +16429,38 @@ def to_map(self): return _map result = dict() - result['Algorithms'] = [] - if self.algorithms is not None: - for k in self.algorithms: - result['Algorithms'].append(k.to_map() if k else None) + result['NodePodInfos'] = [] + if self.node_pod_infos is not None: + for k in self.node_pod_infos: + result['NodePodInfos'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id - if self.total_count is not None: - result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() - self.algorithms = [] - if m.get('Algorithms') is not None: - for k in m.get('Algorithms'): - temp_model = ListAlgorithmsResponseBodyAlgorithms() - self.algorithms.append(temp_model.from_map(k)) + self.node_pod_infos = [] + if m.get('NodePodInfos') is not None: + for k in m.get('NodePodInfos'): + temp_model = NodePodInfo() + self.node_pod_infos.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('TotalCount') is not None: - self.total_count = m.get('TotalCount') return self -class ListAlgorithmsResponse(TeaModel): +class ListNodePodsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListAlgorithmsResponseBody = None, + body: ListNodePodsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -11496,94 +16485,25 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListAlgorithmsResponseBody() + temp_model = ListNodePodsResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListComponentVersionSnapshotsRequest(TeaModel): - def __init__( - self, - component_id: str = None, - order: str = None, - page_number: int = None, - page_size: int = None, - snapshot_id: str = None, - sort_by: str = None, - version: str = None, - ): - self.component_id = component_id - self.order = order - self.page_number = page_number - self.page_size = page_size - self.snapshot_id = snapshot_id - self.sort_by = sort_by - self.version = version - - def validate(self): - pass - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.component_id is not None: - result['ComponentId'] = self.component_id - if self.order is not None: - result['Order'] = self.order - if self.page_number is not None: - result['PageNumber'] = self.page_number - if self.page_size is not None: - result['PageSize'] = self.page_size - if self.snapshot_id is not None: - result['SnapshotId'] = self.snapshot_id - if self.sort_by is not None: - result['SortBy'] = self.sort_by - if self.version is not None: - result['Version'] = self.version - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('ComponentId') is not None: - self.component_id = m.get('ComponentId') - if m.get('Order') is not None: - self.order = m.get('Order') - if m.get('PageNumber') is not None: - self.page_number = m.get('PageNumber') - if m.get('PageSize') is not None: - self.page_size = m.get('PageSize') - if m.get('SnapshotId') is not None: - self.snapshot_id = m.get('SnapshotId') - if m.get('SortBy') is not None: - self.sort_by = m.get('SortBy') - if m.get('Version') is not None: - self.version = m.get('Version') - return self - - -class ListComponentVersionSnapshotsResponseBodySnapshots(TeaModel): +class ListNodeTypesRequest(TeaModel): def __init__( self, - component_id: str = None, - description: str = None, - is_current_version: bool = None, - snapshot_id: str = None, - tenant_id: str = None, - user_id: str = None, - version: str = None, - workspace_id: str = None, + accelerator_type: str = None, + gputype: str = None, + node_types: str = None, + quota_id: str = None, + resource_group_ids: str = None, ): - self.component_id = component_id - self.description = description - self.is_current_version = is_current_version - self.snapshot_id = snapshot_id - self.tenant_id = tenant_id - self.user_id = user_id - self.version = version - self.workspace_id = workspace_id + self.accelerator_type = accelerator_type + self.gputype = gputype + self.node_types = node_types + self.quota_id = quota_id + self.resource_group_ids = resource_group_ids def validate(self): pass @@ -11594,59 +16514,51 @@ def to_map(self): return _map result = dict() - if self.component_id is not None: - result['ComponentId'] = self.component_id - if self.description is not None: - result['Description'] = self.description - if self.is_current_version is not None: - result['IsCurrentVersion'] = self.is_current_version - if self.snapshot_id is not None: - result['SnapshotId'] = self.snapshot_id - if self.tenant_id is not None: - result['TenantId'] = self.tenant_id - if self.user_id is not None: - result['UserId'] = self.user_id - if self.version is not None: - result['Version'] = self.version - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.accelerator_type is not None: + result['AcceleratorType'] = self.accelerator_type + if self.gputype is not None: + result['GPUType'] = self.gputype + if self.node_types is not None: + result['NodeTypes'] = self.node_types + if self.quota_id is not None: + result['QuotaId'] = self.quota_id + if self.resource_group_ids is not None: + result['ResourceGroupIds'] = self.resource_group_ids return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ComponentId') is not None: - self.component_id = m.get('ComponentId') - if m.get('Description') is not None: - self.description = m.get('Description') - if m.get('IsCurrentVersion') is not None: - self.is_current_version = m.get('IsCurrentVersion') - if m.get('SnapshotId') is not None: - self.snapshot_id = m.get('SnapshotId') - if m.get('TenantId') is not None: - self.tenant_id = m.get('TenantId') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') - if m.get('Version') is not None: - self.version = m.get('Version') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') + if m.get('AcceleratorType') is not None: + self.accelerator_type = m.get('AcceleratorType') + if m.get('GPUType') is not None: + self.gputype = m.get('GPUType') + if m.get('NodeTypes') is not None: + self.node_types = m.get('NodeTypes') + if m.get('QuotaId') is not None: + self.quota_id = m.get('QuotaId') + if m.get('ResourceGroupIds') is not None: + self.resource_group_ids = m.get('ResourceGroupIds') return self -class ListComponentVersionSnapshotsResponseBody(TeaModel): +class ListNodeTypesResponseBody(TeaModel): def __init__( self, + node_types: List[NodeType] = None, request_id: str = None, - snapshots: List[ListComponentVersionSnapshotsResponseBodySnapshots] = None, - total_count: int = None, + statistics: List[NodeTypeStatistic] = None, ): + self.node_types = node_types self.request_id = request_id - self.snapshots = snapshots - self.total_count = total_count + self.statistics = statistics - def validate(self): - if self.snapshots: - for k in self.snapshots: + def validate(self): + if self.node_types: + for k in self.node_types: + if k: + k.validate() + if self.statistics: + for k in self.statistics: if k: k.validate() @@ -11656,45 +16568,47 @@ def to_map(self): return _map result = dict() + result['NodeTypes'] = [] + if self.node_types is not None: + for k in self.node_types: + result['NodeTypes'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id - result['Snapshots'] = [] - if self.snapshots is not None: - for k in self.snapshots: - result['Snapshots'].append(k.to_map() if k else None) - if self.total_count is not None: - result['TotalCount'] = self.total_count + result['Statistics'] = [] + if self.statistics is not None: + for k in self.statistics: + result['Statistics'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() + self.node_types = [] + if m.get('NodeTypes') is not None: + for k in m.get('NodeTypes'): + temp_model = NodeType() + self.node_types.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - self.snapshots = [] - if m.get('Snapshots') is not None: - for k in m.get('Snapshots'): - temp_model = ListComponentVersionSnapshotsResponseBodySnapshots() - self.snapshots.append(temp_model.from_map(k)) - if m.get('TotalCount') is not None: - self.total_count = m.get('TotalCount') + self.statistics = [] + if m.get('Statistics') is not None: + for k in m.get('Statistics'): + temp_model = NodeTypeStatistic() + self.statistics.append(temp_model.from_map(k)) return self -class ListComponentVersionSnapshotsResponse(TeaModel): +class ListNodeTypesResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListComponentVersionSnapshotsResponseBody = None, + body: ListNodeTypesResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -11719,84 +16633,45 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListComponentVersionSnapshotsResponseBody() + temp_model = ListNodeTypesResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListComponentVersionsRequest(TeaModel): - def __init__( - self, - labels: Dict[str, str] = None, - order: str = None, - page_number: int = None, - page_size: int = None, - sort_by: str = None, - version: str = None, - ): - self.labels = labels - self.order = order - self.page_number = page_number - self.page_size = page_size - self.sort_by = sort_by - self.version = version - - def validate(self): - pass - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.labels is not None: - result['Labels'] = self.labels - if self.order is not None: - result['Order'] = self.order - if self.page_number is not None: - result['PageNumber'] = self.page_number - if self.page_size is not None: - result['PageSize'] = self.page_size - if self.sort_by is not None: - result['SortBy'] = self.sort_by - if self.version is not None: - result['Version'] = self.version - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('Labels') is not None: - self.labels = m.get('Labels') - if m.get('Order') is not None: - self.order = m.get('Order') - if m.get('PageNumber') is not None: - self.page_number = m.get('PageNumber') - if m.get('PageSize') is not None: - self.page_size = m.get('PageSize') - if m.get('SortBy') is not None: - self.sort_by = m.get('SortBy') - if m.get('Version') is not None: - self.version = m.get('Version') - return self - - -class ListComponentVersionsShrinkRequest(TeaModel): +class ListNodesRequest(TeaModel): def __init__( self, - labels_shrink: str = None, + accelerator_type: str = None, + filter_by_quota_id: str = None, + filter_by_resource_group_ids: str = None, + gputype: str = None, + node_names: str = None, + node_statuses: str = None, + node_types: str = None, order: str = None, + order_statuses: str = None, page_number: int = None, page_size: int = None, + quota_id: str = None, + resource_group_ids: str = None, sort_by: str = None, - version: str = None, + verbose: bool = None, ): - self.labels_shrink = labels_shrink + self.accelerator_type = accelerator_type + self.filter_by_quota_id = filter_by_quota_id + self.filter_by_resource_group_ids = filter_by_resource_group_ids + self.gputype = gputype + self.node_names = node_names + self.node_statuses = node_statuses + self.node_types = node_types self.order = order + self.order_statuses = order_statuses self.page_number = page_number self.page_size = page_size + self.quota_id = quota_id + self.resource_group_ids = resource_group_ids self.sort_by = sort_by - self.version = version + self.verbose = verbose def validate(self): pass @@ -11807,146 +16682,87 @@ def to_map(self): return _map result = dict() - if self.labels_shrink is not None: - result['Labels'] = self.labels_shrink + if self.accelerator_type is not None: + result['AcceleratorType'] = self.accelerator_type + if self.filter_by_quota_id is not None: + result['FilterByQuotaId'] = self.filter_by_quota_id + if self.filter_by_resource_group_ids is not None: + result['FilterByResourceGroupIds'] = self.filter_by_resource_group_ids + if self.gputype is not None: + result['GPUType'] = self.gputype + if self.node_names is not None: + result['NodeNames'] = self.node_names + if self.node_statuses is not None: + result['NodeStatuses'] = self.node_statuses + if self.node_types is not None: + result['NodeTypes'] = self.node_types if self.order is not None: result['Order'] = self.order + if self.order_statuses is not None: + result['OrderStatuses'] = self.order_statuses if self.page_number is not None: result['PageNumber'] = self.page_number if self.page_size is not None: result['PageSize'] = self.page_size + if self.quota_id is not None: + result['QuotaId'] = self.quota_id + if self.resource_group_ids is not None: + result['ResourceGroupIds'] = self.resource_group_ids if self.sort_by is not None: result['SortBy'] = self.sort_by - if self.version is not None: - result['Version'] = self.version - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('Labels') is not None: - self.labels_shrink = m.get('Labels') - if m.get('Order') is not None: - self.order = m.get('Order') - if m.get('PageNumber') is not None: - self.page_number = m.get('PageNumber') - if m.get('PageSize') is not None: - self.page_size = m.get('PageSize') - if m.get('SortBy') is not None: - self.sort_by = m.get('SortBy') - if m.get('Version') is not None: - self.version = m.get('Version') - return self - - -class ListComponentVersionsResponseBodyComponentVersions(TeaModel): - def __init__( - self, - component_id: str = None, - gmt_create_time: str = None, - gmt_modified_time: str = None, - labels: List[Label] = None, - name: str = None, - provider: str = None, - status: str = None, - tenant_id: str = None, - user_id: str = None, - version: str = None, - workspace_id: str = None, - ): - self.component_id = component_id - self.gmt_create_time = gmt_create_time - self.gmt_modified_time = gmt_modified_time - self.labels = labels - self.name = name - self.provider = provider - self.status = status - self.tenant_id = tenant_id - self.user_id = user_id - self.version = version - self.workspace_id = workspace_id - - def validate(self): - if self.labels: - for k in self.labels: - if k: - k.validate() - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.component_id is not None: - result['ComponentId'] = self.component_id - if self.gmt_create_time is not None: - result['GmtCreateTime'] = self.gmt_create_time - if self.gmt_modified_time is not None: - result['GmtModifiedTime'] = self.gmt_modified_time - result['Labels'] = [] - if self.labels is not None: - for k in self.labels: - result['Labels'].append(k.to_map() if k else None) - if self.name is not None: - result['Name'] = self.name - if self.provider is not None: - result['Provider'] = self.provider - if self.status is not None: - result['Status'] = self.status - if self.tenant_id is not None: - result['TenantId'] = self.tenant_id - if self.user_id is not None: - result['UserId'] = self.user_id - if self.version is not None: - result['Version'] = self.version - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('ComponentId') is not None: - self.component_id = m.get('ComponentId') - if m.get('GmtCreateTime') is not None: - self.gmt_create_time = m.get('GmtCreateTime') - if m.get('GmtModifiedTime') is not None: - self.gmt_modified_time = m.get('GmtModifiedTime') - self.labels = [] - if m.get('Labels') is not None: - for k in m.get('Labels'): - temp_model = Label() - self.labels.append(temp_model.from_map(k)) - if m.get('Name') is not None: - self.name = m.get('Name') - if m.get('Provider') is not None: - self.provider = m.get('Provider') - if m.get('Status') is not None: - self.status = m.get('Status') - if m.get('TenantId') is not None: - self.tenant_id = m.get('TenantId') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') - if m.get('Version') is not None: - self.version = m.get('Version') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') + if self.verbose is not None: + result['Verbose'] = self.verbose + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('AcceleratorType') is not None: + self.accelerator_type = m.get('AcceleratorType') + if m.get('FilterByQuotaId') is not None: + self.filter_by_quota_id = m.get('FilterByQuotaId') + if m.get('FilterByResourceGroupIds') is not None: + self.filter_by_resource_group_ids = m.get('FilterByResourceGroupIds') + if m.get('GPUType') is not None: + self.gputype = m.get('GPUType') + if m.get('NodeNames') is not None: + self.node_names = m.get('NodeNames') + if m.get('NodeStatuses') is not None: + self.node_statuses = m.get('NodeStatuses') + if m.get('NodeTypes') is not None: + self.node_types = m.get('NodeTypes') + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('OrderStatuses') is not None: + self.order_statuses = m.get('OrderStatuses') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('QuotaId') is not None: + self.quota_id = m.get('QuotaId') + if m.get('ResourceGroupIds') is not None: + self.resource_group_ids = m.get('ResourceGroupIds') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') + if m.get('Verbose') is not None: + self.verbose = m.get('Verbose') return self -class ListComponentVersionsResponseBody(TeaModel): +class ListNodesResponseBody(TeaModel): def __init__( self, - component_versions: List[ListComponentVersionsResponseBodyComponentVersions] = None, + nodes: List[Node] = None, request_id: str = None, total_count: int = None, ): - self.component_versions = component_versions + self.nodes = nodes self.request_id = request_id self.total_count = total_count def validate(self): - if self.component_versions: - for k in self.component_versions: + if self.nodes: + for k in self.nodes: if k: k.validate() @@ -11956,10 +16772,10 @@ def to_map(self): return _map result = dict() - result['ComponentVersions'] = [] - if self.component_versions is not None: - for k in self.component_versions: - result['ComponentVersions'].append(k.to_map() if k else None) + result['Nodes'] = [] + if self.nodes is not None: + for k in self.nodes: + result['Nodes'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id if self.total_count is not None: @@ -11968,11 +16784,11 @@ def to_map(self): def from_map(self, m: dict = None): m = m or dict() - self.component_versions = [] - if m.get('ComponentVersions') is not None: - for k in m.get('ComponentVersions'): - temp_model = ListComponentVersionsResponseBodyComponentVersions() - self.component_versions.append(temp_model.from_map(k)) + self.nodes = [] + if m.get('Nodes') is not None: + for k in m.get('Nodes'): + temp_model = Node() + self.nodes.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') if m.get('TotalCount') is not None: @@ -11980,21 +16796,18 @@ def from_map(self, m: dict = None): return self -class ListComponentVersionsResponse(TeaModel): +class ListNodesResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListComponentVersionsResponseBody = None, + body: ListNodesResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -12019,116 +16832,33 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListComponentVersionsResponseBody() + temp_model = ListNodesResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListComponentsRequest(TeaModel): - def __init__( - self, - component_id: str = None, - component_ids: str = None, - labels: Dict[str, Any] = None, - name: str = None, - order: str = None, - page_number: int = None, - page_size: int = None, - provider: str = None, - sort_by: str = None, - workspace_id: str = None, - ): - self.component_id = component_id - self.component_ids = component_ids - self.labels = labels - self.name = name - self.order = order - self.page_number = page_number - self.page_size = page_size - self.provider = provider - self.sort_by = sort_by - self.workspace_id = workspace_id - - def validate(self): - pass - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.component_id is not None: - result['ComponentId'] = self.component_id - if self.component_ids is not None: - result['ComponentIds'] = self.component_ids - if self.labels is not None: - result['Labels'] = self.labels - if self.name is not None: - result['Name'] = self.name - if self.order is not None: - result['Order'] = self.order - if self.page_number is not None: - result['PageNumber'] = self.page_number - if self.page_size is not None: - result['PageSize'] = self.page_size - if self.provider is not None: - result['Provider'] = self.provider - if self.sort_by is not None: - result['SortBy'] = self.sort_by - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('ComponentId') is not None: - self.component_id = m.get('ComponentId') - if m.get('ComponentIds') is not None: - self.component_ids = m.get('ComponentIds') - if m.get('Labels') is not None: - self.labels = m.get('Labels') - if m.get('Name') is not None: - self.name = m.get('Name') - if m.get('Order') is not None: - self.order = m.get('Order') - if m.get('PageNumber') is not None: - self.page_number = m.get('PageNumber') - if m.get('PageSize') is not None: - self.page_size = m.get('PageSize') - if m.get('Provider') is not None: - self.provider = m.get('Provider') - if m.get('SortBy') is not None: - self.sort_by = m.get('SortBy') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') - return self - - -class ListComponentsShrinkRequest(TeaModel): +class ListOperationsRequest(TeaModel): def __init__( self, - component_id: str = None, - component_ids: str = None, - labels_shrink: str = None, - name: str = None, + object_id: str = None, + object_type: str = None, + operation_id: str = None, + operation_type: str = None, order: str = None, page_number: int = None, page_size: int = None, - provider: str = None, sort_by: str = None, - workspace_id: str = None, + status: str = None, ): - self.component_id = component_id - self.component_ids = component_ids - self.labels_shrink = labels_shrink - self.name = name + self.object_id = object_id + self.object_type = object_type + self.operation_id = operation_id + self.operation_type = operation_type self.order = order self.page_number = page_number self.page_size = page_size - self.provider = provider self.sort_by = sort_by - self.workspace_id = workspace_id + self.status = status def validate(self): pass @@ -12139,68 +16869,63 @@ def to_map(self): return _map result = dict() - if self.component_id is not None: - result['ComponentId'] = self.component_id - if self.component_ids is not None: - result['ComponentIds'] = self.component_ids - if self.labels_shrink is not None: - result['Labels'] = self.labels_shrink - if self.name is not None: - result['Name'] = self.name + if self.object_id is not None: + result['ObjectId'] = self.object_id + if self.object_type is not None: + result['ObjectType'] = self.object_type + if self.operation_id is not None: + result['OperationId'] = self.operation_id + if self.operation_type is not None: + result['OperationType'] = self.operation_type if self.order is not None: result['Order'] = self.order if self.page_number is not None: result['PageNumber'] = self.page_number if self.page_size is not None: result['PageSize'] = self.page_size - if self.provider is not None: - result['Provider'] = self.provider if self.sort_by is not None: result['SortBy'] = self.sort_by - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.status is not None: + result['Status'] = self.status return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ComponentId') is not None: - self.component_id = m.get('ComponentId') - if m.get('ComponentIds') is not None: - self.component_ids = m.get('ComponentIds') - if m.get('Labels') is not None: - self.labels_shrink = m.get('Labels') - if m.get('Name') is not None: - self.name = m.get('Name') + if m.get('ObjectId') is not None: + self.object_id = m.get('ObjectId') + if m.get('ObjectType') is not None: + self.object_type = m.get('ObjectType') + if m.get('OperationId') is not None: + self.operation_id = m.get('OperationId') + if m.get('OperationType') is not None: + self.operation_type = m.get('OperationType') if m.get('Order') is not None: self.order = m.get('Order') if m.get('PageNumber') is not None: self.page_number = m.get('PageNumber') if m.get('PageSize') is not None: self.page_size = m.get('PageSize') - if m.get('Provider') is not None: - self.provider = m.get('Provider') if m.get('SortBy') is not None: self.sort_by = m.get('SortBy') - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') + if m.get('Status') is not None: + self.status = m.get('Status') return self -class ListComponentsResponseBodyComponentsVersions(TeaModel): +class ListOperationsResponseBody(TeaModel): def __init__( self, - gmt_create_time: str = None, - snapshot_id: str = None, - status: str = None, - version: str = None, + operations: List[ResourceOperation] = None, + request_id: str = None, ): - self.gmt_create_time = gmt_create_time - self.snapshot_id = snapshot_id - self.status = status - self.version = version + self.operations = operations + self.request_id = request_id def validate(self): - pass + if self.operations: + for k in self.operations: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -12208,67 +16933,40 @@ def to_map(self): return _map result = dict() - if self.gmt_create_time is not None: - result['GmtCreateTime'] = self.gmt_create_time - if self.snapshot_id is not None: - result['SnapshotId'] = self.snapshot_id - if self.status is not None: - result['Status'] = self.status - if self.version is not None: - result['Version'] = self.version + result['Operations'] = [] + if self.operations is not None: + for k in self.operations: + result['Operations'].append(k.to_map() if k else None) + if self.request_id is not None: + result['RequestId'] = self.request_id return result def from_map(self, m: dict = None): - m = m or dict() - if m.get('GmtCreateTime') is not None: - self.gmt_create_time = m.get('GmtCreateTime') - if m.get('SnapshotId') is not None: - self.snapshot_id = m.get('SnapshotId') - if m.get('Status') is not None: - self.status = m.get('Status') - if m.get('Version') is not None: - self.version = m.get('Version') + m = m or dict() + self.operations = [] + if m.get('Operations') is not None: + for k in m.get('Operations'): + temp_model = ResourceOperation() + self.operations.append(temp_model.from_map(k)) + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') return self -class ListComponentsResponseBodyComponents(TeaModel): +class ListOperationsResponse(TeaModel): def __init__( self, - component_id: str = None, - description: str = None, - display_name: str = None, - gmt_create_time: str = None, - gmt_modified_time: str = None, - labels: List[Label] = None, - name: str = None, - provider: str = None, - tenant_id: str = None, - user_id: str = None, - versions: List[ListComponentsResponseBodyComponentsVersions] = None, - workspace_id: str = None, + headers: Dict[str, str] = None, + status_code: int = None, + body: ListOperationsResponseBody = None, ): - self.component_id = component_id - self.description = description - self.display_name = display_name - self.gmt_create_time = gmt_create_time - self.gmt_modified_time = gmt_modified_time - self.labels = labels - self.name = name - self.provider = provider - self.tenant_id = tenant_id - self.user_id = user_id - self.versions = versions - self.workspace_id = workspace_id + self.headers = headers + self.status_code = status_code + self.body = body def validate(self): - if self.labels: - for k in self.labels: - if k: - k.validate() - if self.versions: - for k in self.versions: - if k: - k.validate() + if self.body: + self.body.validate() def to_map(self): _map = super().to_map() @@ -12276,85 +16974,42 @@ def to_map(self): return _map result = dict() - if self.component_id is not None: - result['ComponentId'] = self.component_id - if self.description is not None: - result['Description'] = self.description - if self.display_name is not None: - result['DisplayName'] = self.display_name - if self.gmt_create_time is not None: - result['GmtCreateTime'] = self.gmt_create_time - if self.gmt_modified_time is not None: - result['GmtModifiedTime'] = self.gmt_modified_time - result['Labels'] = [] - if self.labels is not None: - for k in self.labels: - result['Labels'].append(k.to_map() if k else None) - if self.name is not None: - result['Name'] = self.name - if self.provider is not None: - result['Provider'] = self.provider - if self.tenant_id is not None: - result['TenantId'] = self.tenant_id - if self.user_id is not None: - result['UserId'] = self.user_id - result['Versions'] = [] - if self.versions is not None: - for k in self.versions: - result['Versions'].append(k.to_map() if k else None) - if self.workspace_id is not None: - result['WorkspaceId'] = self.workspace_id + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ComponentId') is not None: - self.component_id = m.get('ComponentId') - if m.get('Description') is not None: - self.description = m.get('Description') - if m.get('DisplayName') is not None: - self.display_name = m.get('DisplayName') - if m.get('GmtCreateTime') is not None: - self.gmt_create_time = m.get('GmtCreateTime') - if m.get('GmtModifiedTime') is not None: - self.gmt_modified_time = m.get('GmtModifiedTime') - self.labels = [] - if m.get('Labels') is not None: - for k in m.get('Labels'): - temp_model = Label() - self.labels.append(temp_model.from_map(k)) - if m.get('Name') is not None: - self.name = m.get('Name') - if m.get('Provider') is not None: - self.provider = m.get('Provider') - if m.get('TenantId') is not None: - self.tenant_id = m.get('TenantId') - if m.get('UserId') is not None: - self.user_id = m.get('UserId') - self.versions = [] - if m.get('Versions') is not None: - for k in m.get('Versions'): - temp_model = ListComponentsResponseBodyComponentsVersions() - self.versions.append(temp_model.from_map(k)) - if m.get('WorkspaceId') is not None: - self.workspace_id = m.get('WorkspaceId') + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = ListOperationsResponseBody() + self.body = temp_model.from_map(m['body']) return self -class ListComponentsResponseBody(TeaModel): +class ListPermissionsResponseBody(TeaModel): def __init__( self, - components: List[ListComponentsResponseBodyComponents] = None, + features: Features = None, + permissions: List[Permission] = None, request_id: str = None, - total_count: int = None, ): - self.components = components + self.features = features + self.permissions = permissions self.request_id = request_id - self.total_count = total_count def validate(self): - if self.components: - for k in self.components: + if self.features: + self.features.validate() + if self.permissions: + for k in self.permissions: if k: k.validate() @@ -12364,45 +17019,43 @@ def to_map(self): return _map result = dict() - result['Components'] = [] - if self.components is not None: - for k in self.components: - result['Components'].append(k.to_map() if k else None) + if self.features is not None: + result['Features'] = self.features.to_map() + result['Permissions'] = [] + if self.permissions is not None: + for k in self.permissions: + result['Permissions'].append(k.to_map() if k else None) if self.request_id is not None: - result['RequestId'] = self.request_id - if self.total_count is not None: - result['TotalCount'] = self.total_count + result['requestId'] = self.request_id return result def from_map(self, m: dict = None): m = m or dict() - self.components = [] - if m.get('Components') is not None: - for k in m.get('Components'): - temp_model = ListComponentsResponseBodyComponents() - self.components.append(temp_model.from_map(k)) - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') - if m.get('TotalCount') is not None: - self.total_count = m.get('TotalCount') + if m.get('Features') is not None: + temp_model = Features() + self.features = temp_model.from_map(m['Features']) + self.permissions = [] + if m.get('Permissions') is not None: + for k in m.get('Permissions'): + temp_model = Permission() + self.permissions.append(temp_model.from_map(k)) + if m.get('requestId') is not None: + self.request_id = m.get('requestId') return self -class ListComponentsResponse(TeaModel): +class ListPermissionsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListComponentsResponseBody = None, + body: ListPermissionsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -12427,27 +17080,31 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListComponentsResponseBody() + temp_model = ListPermissionsResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListInstanceJobsRequest(TeaModel): +class ListQuotaUsersRequest(TeaModel): def __init__( self, - instance_job_type: str = None, order: str = None, page_number: int = None, page_size: int = None, + self_only: bool = None, sort_by: str = None, - status: str = None, + user_id: str = None, + username: str = None, + workspace_id: str = None, ): - self.instance_job_type = instance_job_type self.order = order self.page_number = page_number self.page_size = page_size + self.self_only = self_only self.sort_by = sort_by - self.status = status + self.user_id = user_id + self.username = username + self.workspace_id = workspace_id def validate(self): pass @@ -12458,126 +17115,61 @@ def to_map(self): return _map result = dict() - if self.instance_job_type is not None: - result['InstanceJobType'] = self.instance_job_type if self.order is not None: result['Order'] = self.order if self.page_number is not None: result['PageNumber'] = self.page_number if self.page_size is not None: result['PageSize'] = self.page_size + if self.self_only is not None: + result['SelfOnly'] = self.self_only if self.sort_by is not None: result['SortBy'] = self.sort_by - if self.status is not None: - result['Status'] = self.status - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('InstanceJobType') is not None: - self.instance_job_type = m.get('InstanceJobType') - if m.get('Order') is not None: - self.order = m.get('Order') - if m.get('PageNumber') is not None: - self.page_number = m.get('PageNumber') - if m.get('PageSize') is not None: - self.page_size = m.get('PageSize') - if m.get('SortBy') is not None: - self.sort_by = m.get('SortBy') - if m.get('Status') is not None: - self.status = m.get('Status') - return self - - -class ListInstanceJobsResponseBodyInstanceJobs(TeaModel): - def __init__( - self, - creator: str = None, - gmt_create_time: str = None, - instance_id: str = None, - instance_job_id: str = None, - instance_job_type: str = None, - reason_code: str = None, - reason_message: str = None, - status: str = None, - workspace_id: str = None, - ): - self.creator = creator - self.gmt_create_time = gmt_create_time - self.instance_id = instance_id - self.instance_job_id = instance_job_id - self.instance_job_type = instance_job_type - self.reason_code = reason_code - self.reason_message = reason_message - self.status = status - self.workspace_id = workspace_id - - def validate(self): - pass - - def to_map(self): - _map = super().to_map() - if _map is not None: - return _map - - result = dict() - if self.creator is not None: - result['Creator'] = self.creator - if self.gmt_create_time is not None: - result['GmtCreateTime'] = self.gmt_create_time - if self.instance_id is not None: - result['InstanceId'] = self.instance_id - if self.instance_job_id is not None: - result['InstanceJobId'] = self.instance_job_id - if self.instance_job_type is not None: - result['InstanceJobType'] = self.instance_job_type - if self.reason_code is not None: - result['ReasonCode'] = self.reason_code - if self.reason_message is not None: - result['ReasonMessage'] = self.reason_message - if self.status is not None: - result['Status'] = self.status + if self.user_id is not None: + result['UserId'] = self.user_id + if self.username is not None: + result['Username'] = self.username if self.workspace_id is not None: result['WorkspaceId'] = self.workspace_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Creator') is not None: - self.creator = m.get('Creator') - if m.get('GmtCreateTime') is not None: - self.gmt_create_time = m.get('GmtCreateTime') - if m.get('InstanceId') is not None: - self.instance_id = m.get('InstanceId') - if m.get('InstanceJobId') is not None: - self.instance_job_id = m.get('InstanceJobId') - if m.get('InstanceJobType') is not None: - self.instance_job_type = m.get('InstanceJobType') - if m.get('ReasonCode') is not None: - self.reason_code = m.get('ReasonCode') - if m.get('ReasonMessage') is not None: - self.reason_message = m.get('ReasonMessage') - if m.get('Status') is not None: - self.status = m.get('Status') + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('SelfOnly') is not None: + self.self_only = m.get('SelfOnly') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') + if m.get('UserId') is not None: + self.user_id = m.get('UserId') + if m.get('Username') is not None: + self.username = m.get('Username') if m.get('WorkspaceId') is not None: self.workspace_id = m.get('WorkspaceId') return self -class ListInstanceJobsResponseBody(TeaModel): +class ListQuotaUsersResponseBody(TeaModel): def __init__( self, - instance_jobs: ListInstanceJobsResponseBodyInstanceJobs = None, + quota_users: List[QuotaUser] = None, request_id: str = None, - total_count: int = None, + total_count: str = None, ): - self.instance_jobs = instance_jobs + self.quota_users = quota_users self.request_id = request_id self.total_count = total_count def validate(self): - if self.instance_jobs: - self.instance_jobs.validate() + if self.quota_users: + for k in self.quota_users: + if k: + k.validate() def to_map(self): _map = super().to_map() @@ -12585,8 +17177,10 @@ def to_map(self): return _map result = dict() - if self.instance_jobs is not None: - result['InstanceJobs'] = self.instance_jobs.to_map() + result['QuotaUsers'] = [] + if self.quota_users is not None: + for k in self.quota_users: + result['QuotaUsers'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id if self.total_count is not None: @@ -12595,9 +17189,11 @@ def to_map(self): def from_map(self, m: dict = None): m = m or dict() - if m.get('InstanceJobs') is not None: - temp_model = ListInstanceJobsResponseBodyInstanceJobs() - self.instance_jobs = temp_model.from_map(m['InstanceJobs']) + self.quota_users = [] + if m.get('QuotaUsers') is not None: + for k in m.get('QuotaUsers'): + temp_model = QuotaUser() + self.quota_users.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') if m.get('TotalCount') is not None: @@ -12605,21 +17201,18 @@ def from_map(self, m: dict = None): return self -class ListInstanceJobsResponse(TeaModel): +class ListQuotaUsersResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListInstanceJobsResponseBody = None, + body: ListQuotaUsersResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -12644,28 +17237,47 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListInstanceJobsResponseBody() + temp_model = ListQuotaUsersResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListNodeTypesRequest(TeaModel): +class ListQuotaWorkloadsRequest(TeaModel): def __init__( self, - accelerator_type: str = None, - gputype: str = None, - node_types: str = None, - quota_id: str = None, - resource_group_ids: str = None, + before_workload_id: str = None, + node_name: str = None, + order: str = None, + page_number: int = None, + page_size: int = None, + show_own: bool = None, + sort_by: str = None, + status: str = None, + sub_quota_ids: str = None, + user_ids: str = None, + workload_created_time_range: TimeRangeFilter = None, + workload_ids: str = None, + workload_type: str = None, + workspace_ids: str = None, ): - self.accelerator_type = accelerator_type - self.gputype = gputype - self.node_types = node_types - self.quota_id = quota_id - self.resource_group_ids = resource_group_ids + self.before_workload_id = before_workload_id + self.node_name = node_name + self.order = order + self.page_number = page_number + self.page_size = page_size + self.show_own = show_own + self.sort_by = sort_by + self.status = status + self.sub_quota_ids = sub_quota_ids + self.user_ids = user_ids + self.workload_created_time_range = workload_created_time_range + self.workload_ids = workload_ids + self.workload_type = workload_type + self.workspace_ids = workspace_ids def validate(self): - pass + if self.workload_created_time_range: + self.workload_created_time_range.validate() def to_map(self): _map = super().to_map() @@ -12673,51 +17285,84 @@ def to_map(self): return _map result = dict() - if self.accelerator_type is not None: - result['AcceleratorType'] = self.accelerator_type - if self.gputype is not None: - result['GPUType'] = self.gputype - if self.node_types is not None: - result['NodeTypes'] = self.node_types - if self.quota_id is not None: - result['QuotaId'] = self.quota_id - if self.resource_group_ids is not None: - result['ResourceGroupIds'] = self.resource_group_ids + if self.before_workload_id is not None: + result['BeforeWorkloadId'] = self.before_workload_id + if self.node_name is not None: + result['NodeName'] = self.node_name + if self.order is not None: + result['Order'] = self.order + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.show_own is not None: + result['ShowOwn'] = self.show_own + if self.sort_by is not None: + result['SortBy'] = self.sort_by + if self.status is not None: + result['Status'] = self.status + if self.sub_quota_ids is not None: + result['SubQuotaIds'] = self.sub_quota_ids + if self.user_ids is not None: + result['UserIds'] = self.user_ids + if self.workload_created_time_range is not None: + result['WorkloadCreatedTimeRange'] = self.workload_created_time_range.to_map() + if self.workload_ids is not None: + result['WorkloadIds'] = self.workload_ids + if self.workload_type is not None: + result['WorkloadType'] = self.workload_type + if self.workspace_ids is not None: + result['WorkspaceIds'] = self.workspace_ids return result def from_map(self, m: dict = None): m = m or dict() - if m.get('AcceleratorType') is not None: - self.accelerator_type = m.get('AcceleratorType') - if m.get('GPUType') is not None: - self.gputype = m.get('GPUType') - if m.get('NodeTypes') is not None: - self.node_types = m.get('NodeTypes') - if m.get('QuotaId') is not None: - self.quota_id = m.get('QuotaId') - if m.get('ResourceGroupIds') is not None: - self.resource_group_ids = m.get('ResourceGroupIds') + if m.get('BeforeWorkloadId') is not None: + self.before_workload_id = m.get('BeforeWorkloadId') + if m.get('NodeName') is not None: + self.node_name = m.get('NodeName') + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('ShowOwn') is not None: + self.show_own = m.get('ShowOwn') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') + if m.get('Status') is not None: + self.status = m.get('Status') + if m.get('SubQuotaIds') is not None: + self.sub_quota_ids = m.get('SubQuotaIds') + if m.get('UserIds') is not None: + self.user_ids = m.get('UserIds') + if m.get('WorkloadCreatedTimeRange') is not None: + temp_model = TimeRangeFilter() + self.workload_created_time_range = temp_model.from_map(m['WorkloadCreatedTimeRange']) + if m.get('WorkloadIds') is not None: + self.workload_ids = m.get('WorkloadIds') + if m.get('WorkloadType') is not None: + self.workload_type = m.get('WorkloadType') + if m.get('WorkspaceIds') is not None: + self.workspace_ids = m.get('WorkspaceIds') return self -class ListNodeTypesResponseBody(TeaModel): +class ListQuotaWorkloadsResponseBody(TeaModel): def __init__( self, - node_types: List[NodeType] = None, request_id: str = None, - statistics: List[NodeTypeStatistic] = None, + total_count: int = None, + workloads: List[QueueInfo] = None, ): - self.node_types = node_types self.request_id = request_id - self.statistics = statistics + self.total_count = total_count + self.workloads = workloads def validate(self): - if self.node_types: - for k in self.node_types: - if k: - k.validate() - if self.statistics: - for k in self.statistics: + if self.workloads: + for k in self.workloads: if k: k.validate() @@ -12727,50 +17372,42 @@ def to_map(self): return _map result = dict() - result['NodeTypes'] = [] - if self.node_types is not None: - for k in self.node_types: - result['NodeTypes'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id - result['Statistics'] = [] - if self.statistics is not None: - for k in self.statistics: - result['Statistics'].append(k.to_map() if k else None) + if self.total_count is not None: + result['TotalCount'] = self.total_count + result['Workloads'] = [] + if self.workloads is not None: + for k in self.workloads: + result['Workloads'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() - self.node_types = [] - if m.get('NodeTypes') is not None: - for k in m.get('NodeTypes'): - temp_model = NodeType() - self.node_types.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - self.statistics = [] - if m.get('Statistics') is not None: - for k in m.get('Statistics'): - temp_model = NodeTypeStatistic() - self.statistics.append(temp_model.from_map(k)) + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') + self.workloads = [] + if m.get('Workloads') is not None: + for k in m.get('Workloads'): + temp_model = QueueInfo() + self.workloads.append(temp_model.from_map(k)) return self -class ListNodeTypesResponse(TeaModel): +class ListQuotaWorkloadsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListNodeTypesResponseBody = None, + body: ListQuotaWorkloadsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -12795,37 +17432,41 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListNodeTypesResponseBody() + temp_model = ListQuotaWorkloadsResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListNodesRequest(TeaModel): +class ListQuotasRequest(TeaModel): def __init__( self, - accelerator_type: str = None, - gputype: str = None, - node_statuses: str = None, - node_types: str = None, + labels: str = None, + layout_mode: str = None, order: str = None, - order_statuses: str = None, page_number: int = None, page_size: int = None, - quota_id: str = None, - resource_group_ids: str = None, + parent_quota_id: str = None, + quota_ids: str = None, + quota_name: str = None, + resource_type: str = None, sort_by: str = None, + statuses: str = None, + verbose: bool = None, + workspace_ids: str = None, ): - self.accelerator_type = accelerator_type - self.gputype = gputype - self.node_statuses = node_statuses - self.node_types = node_types + self.labels = labels + self.layout_mode = layout_mode self.order = order - self.order_statuses = order_statuses self.page_number = page_number self.page_size = page_size - self.quota_id = quota_id - self.resource_group_ids = resource_group_ids + self.parent_quota_id = parent_quota_id + self.quota_ids = quota_ids + self.quota_name = quota_name + self.resource_type = resource_type self.sort_by = sort_by + self.statuses = statuses + self.verbose = verbose + self.workspace_ids = workspace_ids def validate(self): pass @@ -12836,69 +17477,79 @@ def to_map(self): return _map result = dict() - if self.accelerator_type is not None: - result['AcceleratorType'] = self.accelerator_type - if self.gputype is not None: - result['GPUType'] = self.gputype - if self.node_statuses is not None: - result['NodeStatuses'] = self.node_statuses - if self.node_types is not None: - result['NodeTypes'] = self.node_types + if self.labels is not None: + result['Labels'] = self.labels + if self.layout_mode is not None: + result['LayoutMode'] = self.layout_mode if self.order is not None: result['Order'] = self.order - if self.order_statuses is not None: - result['OrderStatuses'] = self.order_statuses if self.page_number is not None: result['PageNumber'] = self.page_number if self.page_size is not None: result['PageSize'] = self.page_size - if self.quota_id is not None: - result['QuotaId'] = self.quota_id - if self.resource_group_ids is not None: - result['ResourceGroupIds'] = self.resource_group_ids + if self.parent_quota_id is not None: + result['ParentQuotaId'] = self.parent_quota_id + if self.quota_ids is not None: + result['QuotaIds'] = self.quota_ids + if self.quota_name is not None: + result['QuotaName'] = self.quota_name + if self.resource_type is not None: + result['ResourceType'] = self.resource_type if self.sort_by is not None: result['SortBy'] = self.sort_by + if self.statuses is not None: + result['Statuses'] = self.statuses + if self.verbose is not None: + result['Verbose'] = self.verbose + if self.workspace_ids is not None: + result['WorkspaceIds'] = self.workspace_ids return result def from_map(self, m: dict = None): m = m or dict() - if m.get('AcceleratorType') is not None: - self.accelerator_type = m.get('AcceleratorType') - if m.get('GPUType') is not None: - self.gputype = m.get('GPUType') - if m.get('NodeStatuses') is not None: - self.node_statuses = m.get('NodeStatuses') - if m.get('NodeTypes') is not None: - self.node_types = m.get('NodeTypes') + if m.get('Labels') is not None: + self.labels = m.get('Labels') + if m.get('LayoutMode') is not None: + self.layout_mode = m.get('LayoutMode') if m.get('Order') is not None: self.order = m.get('Order') - if m.get('OrderStatuses') is not None: - self.order_statuses = m.get('OrderStatuses') if m.get('PageNumber') is not None: self.page_number = m.get('PageNumber') if m.get('PageSize') is not None: self.page_size = m.get('PageSize') - if m.get('QuotaId') is not None: - self.quota_id = m.get('QuotaId') - if m.get('ResourceGroupIds') is not None: - self.resource_group_ids = m.get('ResourceGroupIds') + if m.get('ParentQuotaId') is not None: + self.parent_quota_id = m.get('ParentQuotaId') + if m.get('QuotaIds') is not None: + self.quota_ids = m.get('QuotaIds') + if m.get('QuotaName') is not None: + self.quota_name = m.get('QuotaName') + if m.get('ResourceType') is not None: + self.resource_type = m.get('ResourceType') if m.get('SortBy') is not None: self.sort_by = m.get('SortBy') + if m.get('Statuses') is not None: + self.statuses = m.get('Statuses') + if m.get('Verbose') is not None: + self.verbose = m.get('Verbose') + if m.get('WorkspaceIds') is not None: + self.workspace_ids = m.get('WorkspaceIds') return self -class ListNodesResponseBody(TeaModel): +class ListQuotasResponseBody(TeaModel): def __init__( self, - nodes: List[Node] = None, + quotas: List[Quota] = None, request_id: str = None, + total_count: int = None, ): - self.nodes = nodes + self.quotas = quotas self.request_id = request_id + self.total_count = total_count def validate(self): - if self.nodes: - for k in self.nodes: + if self.quotas: + for k in self.quotas: if k: k.validate() @@ -12908,41 +17559,42 @@ def to_map(self): return _map result = dict() - result['Nodes'] = [] - if self.nodes is not None: - for k in self.nodes: - result['Nodes'].append(k.to_map() if k else None) + result['Quotas'] = [] + if self.quotas is not None: + for k in self.quotas: + result['Quotas'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id + if self.total_count is not None: + result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() - self.nodes = [] - if m.get('Nodes') is not None: - for k in m.get('Nodes'): - temp_model = Node() - self.nodes.append(temp_model.from_map(k)) + self.quotas = [] + if m.get('Quotas') is not None: + for k in m.get('Quotas'): + temp_model = Quota() + self.quotas.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') return self -class ListNodesResponse(TeaModel): +class ListQuotasResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListNodesResponseBody = None, + body: ListQuotasResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -12967,31 +17619,37 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListNodesResponseBody() + temp_model = ListQuotasResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListOperationsRequest(TeaModel): +class ListResourceGroupMachineGroupsRequest(TeaModel): def __init__( self, - object_id: str = None, - object_type: str = None, - operation_id: str = None, - operation_type: str = None, + creator_id: str = None, + ecs_spec: str = None, + name: str = None, order: str = None, + order_instance_id: str = None, page_number: int = None, page_size: int = None, + payment_duration: str = None, + payment_duration_unit: str = None, + payment_type: str = None, sort_by: str = None, status: str = None, ): - self.object_id = object_id - self.object_type = object_type - self.operation_id = operation_id - self.operation_type = operation_type + self.creator_id = creator_id + self.ecs_spec = ecs_spec + self.name = name self.order = order + self.order_instance_id = order_instance_id self.page_number = page_number self.page_size = page_size + self.payment_duration = payment_duration + self.payment_duration_unit = payment_duration_unit + self.payment_type = payment_type self.sort_by = sort_by self.status = status @@ -13004,20 +17662,26 @@ def to_map(self): return _map result = dict() - if self.object_id is not None: - result['ObjectId'] = self.object_id - if self.object_type is not None: - result['ObjectType'] = self.object_type - if self.operation_id is not None: - result['OperationId'] = self.operation_id - if self.operation_type is not None: - result['OperationType'] = self.operation_type + if self.creator_id is not None: + result['CreatorID'] = self.creator_id + if self.ecs_spec is not None: + result['EcsSpec'] = self.ecs_spec + if self.name is not None: + result['Name'] = self.name if self.order is not None: result['Order'] = self.order + if self.order_instance_id is not None: + result['OrderInstanceId'] = self.order_instance_id if self.page_number is not None: result['PageNumber'] = self.page_number if self.page_size is not None: result['PageSize'] = self.page_size + if self.payment_duration is not None: + result['PaymentDuration'] = self.payment_duration + if self.payment_duration_unit is not None: + result['PaymentDurationUnit'] = self.payment_duration_unit + if self.payment_type is not None: + result['PaymentType'] = self.payment_type if self.sort_by is not None: result['SortBy'] = self.sort_by if self.status is not None: @@ -13026,20 +17690,26 @@ def to_map(self): def from_map(self, m: dict = None): m = m or dict() - if m.get('ObjectId') is not None: - self.object_id = m.get('ObjectId') - if m.get('ObjectType') is not None: - self.object_type = m.get('ObjectType') - if m.get('OperationId') is not None: - self.operation_id = m.get('OperationId') - if m.get('OperationType') is not None: - self.operation_type = m.get('OperationType') + if m.get('CreatorID') is not None: + self.creator_id = m.get('CreatorID') + if m.get('EcsSpec') is not None: + self.ecs_spec = m.get('EcsSpec') + if m.get('Name') is not None: + self.name = m.get('Name') if m.get('Order') is not None: self.order = m.get('Order') + if m.get('OrderInstanceId') is not None: + self.order_instance_id = m.get('OrderInstanceId') if m.get('PageNumber') is not None: self.page_number = m.get('PageNumber') if m.get('PageSize') is not None: self.page_size = m.get('PageSize') + if m.get('PaymentDuration') is not None: + self.payment_duration = m.get('PaymentDuration') + if m.get('PaymentDurationUnit') is not None: + self.payment_duration_unit = m.get('PaymentDurationUnit') + if m.get('PaymentType') is not None: + self.payment_type = m.get('PaymentType') if m.get('SortBy') is not None: self.sort_by = m.get('SortBy') if m.get('Status') is not None: @@ -13047,18 +17717,20 @@ def from_map(self, m: dict = None): return self -class ListOperationsResponseBody(TeaModel): +class ListResourceGroupMachineGroupsResponseBody(TeaModel): def __init__( self, - operations: List[ResourceOperation] = None, + machine_groups: List[MachineGroup] = None, request_id: str = None, + total_count: str = None, ): - self.operations = operations + self.machine_groups = machine_groups self.request_id = request_id + self.total_count = total_count def validate(self): - if self.operations: - for k in self.operations: + if self.machine_groups: + for k in self.machine_groups: if k: k.validate() @@ -13068,41 +17740,42 @@ def to_map(self): return _map result = dict() - result['Operations'] = [] - if self.operations is not None: - for k in self.operations: - result['Operations'].append(k.to_map() if k else None) + result['MachineGroups'] = [] + if self.machine_groups is not None: + for k in self.machine_groups: + result['MachineGroups'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id + if self.total_count is not None: + result['TotalCount'] = self.total_count return result def from_map(self, m: dict = None): m = m or dict() - self.operations = [] - if m.get('Operations') is not None: - for k in m.get('Operations'): - temp_model = ResourceOperation() - self.operations.append(temp_model.from_map(k)) + self.machine_groups = [] + if m.get('MachineGroups') is not None: + for k in m.get('MachineGroups'): + temp_model = MachineGroup() + self.machine_groups.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') return self -class ListOperationsResponse(TeaModel): +class ListResourceGroupMachineGroupsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListOperationsResponseBody = None, + body: ListResourceGroupMachineGroupsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -13127,27 +17800,101 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListOperationsResponseBody() + temp_model = ListResourceGroupMachineGroupsResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListPermissionsResponseBody(TeaModel): +class ListResourceGroupsRequest(TeaModel): + def __init__( + self, + computing_resource_provider: str = None, + name: str = None, + order: str = None, + page_number: int = None, + page_size: int = None, + resource_type: str = None, + show_all: bool = None, + sort_by: str = None, + status: str = None, + ): + self.computing_resource_provider = computing_resource_provider + self.name = name + self.order = order + self.page_number = page_number + self.page_size = page_size + self.resource_type = resource_type + self.show_all = show_all + self.sort_by = sort_by + self.status = status + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.computing_resource_provider is not None: + result['ComputingResourceProvider'] = self.computing_resource_provider + if self.name is not None: + result['Name'] = self.name + if self.order is not None: + result['Order'] = self.order + if self.page_number is not None: + result['PageNumber'] = self.page_number + if self.page_size is not None: + result['PageSize'] = self.page_size + if self.resource_type is not None: + result['ResourceType'] = self.resource_type + if self.show_all is not None: + result['ShowAll'] = self.show_all + if self.sort_by is not None: + result['SortBy'] = self.sort_by + if self.status is not None: + result['Status'] = self.status + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ComputingResourceProvider') is not None: + self.computing_resource_provider = m.get('ComputingResourceProvider') + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('Order') is not None: + self.order = m.get('Order') + if m.get('PageNumber') is not None: + self.page_number = m.get('PageNumber') + if m.get('PageSize') is not None: + self.page_size = m.get('PageSize') + if m.get('ResourceType') is not None: + self.resource_type = m.get('ResourceType') + if m.get('ShowAll') is not None: + self.show_all = m.get('ShowAll') + if m.get('SortBy') is not None: + self.sort_by = m.get('SortBy') + if m.get('Status') is not None: + self.status = m.get('Status') + return self + + +class ListResourceGroupsResponseBody(TeaModel): def __init__( self, - features: Features = None, - permissions: List[Permission] = None, request_id: str = None, + resource_groups: List[ResourceGroup] = None, + total_count: int = None, ): - self.features = features - self.permissions = permissions self.request_id = request_id + self.resource_groups = resource_groups + # This parameter is required. + self.total_count = total_count def validate(self): - if self.features: - self.features.validate() - if self.permissions: - for k in self.permissions: + if self.resource_groups: + for k in self.resource_groups: if k: k.validate() @@ -13157,46 +17904,42 @@ def to_map(self): return _map result = dict() - if self.features is not None: - result['Features'] = self.features.to_map() - result['Permissions'] = [] - if self.permissions is not None: - for k in self.permissions: - result['Permissions'].append(k.to_map() if k else None) if self.request_id is not None: - result['requestId'] = self.request_id - return result - - def from_map(self, m: dict = None): - m = m or dict() - if m.get('Features') is not None: - temp_model = Features() - self.features = temp_model.from_map(m['Features']) - self.permissions = [] - if m.get('Permissions') is not None: - for k in m.get('Permissions'): - temp_model = Permission() - self.permissions.append(temp_model.from_map(k)) - if m.get('requestId') is not None: - self.request_id = m.get('requestId') + result['RequestId'] = self.request_id + result['ResourceGroups'] = [] + if self.resource_groups is not None: + for k in self.resource_groups: + result['ResourceGroups'].append(k.to_map() if k else None) + if self.total_count is not None: + result['TotalCount'] = self.total_count + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + self.resource_groups = [] + if m.get('ResourceGroups') is not None: + for k in m.get('ResourceGroups'): + temp_model = ResourceGroup() + self.resource_groups.append(temp_model.from_map(k)) + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') return self -class ListPermissionsResponse(TeaModel): +class ListResourceGroupsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListPermissionsResponseBody = None, + body: ListResourceGroupsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -13221,35 +17964,17 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListPermissionsResponseBody() + temp_model = ListResourceGroupsResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListQuotasRequest(TeaModel): +class ListSpotsStockPreviewRequest(TeaModel): def __init__( self, - order: str = None, - page_number: int = None, - page_size: int = None, - parent_quota_id: str = None, - quota_ids: str = None, - quota_name: str = None, - resource_type: str = None, - sort_by: str = None, - statuses: str = None, - workspace_ids: str = None, + instance_types: str = None, ): - self.order = order - self.page_number = page_number - self.page_size = page_size - self.parent_quota_id = parent_quota_id - self.quota_ids = quota_ids - self.quota_name = quota_name - self.resource_type = resource_type - self.sort_by = sort_by - self.statuses = statuses - self.workspace_ids = workspace_ids + self.instance_types = instance_types def validate(self): pass @@ -13260,65 +17985,29 @@ def to_map(self): return _map result = dict() - if self.order is not None: - result['Order'] = self.order - if self.page_number is not None: - result['PageNumber'] = self.page_number - if self.page_size is not None: - result['PageSize'] = self.page_size - if self.parent_quota_id is not None: - result['ParentQuotaId'] = self.parent_quota_id - if self.quota_ids is not None: - result['QuotaIds'] = self.quota_ids - if self.quota_name is not None: - result['QuotaName'] = self.quota_name - if self.resource_type is not None: - result['ResourceType'] = self.resource_type - if self.sort_by is not None: - result['SortBy'] = self.sort_by - if self.statuses is not None: - result['Statuses'] = self.statuses - if self.workspace_ids is not None: - result['WorkspaceIds'] = self.workspace_ids + if self.instance_types is not None: + result['InstanceTypes'] = self.instance_types return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Order') is not None: - self.order = m.get('Order') - if m.get('PageNumber') is not None: - self.page_number = m.get('PageNumber') - if m.get('PageSize') is not None: - self.page_size = m.get('PageSize') - if m.get('ParentQuotaId') is not None: - self.parent_quota_id = m.get('ParentQuotaId') - if m.get('QuotaIds') is not None: - self.quota_ids = m.get('QuotaIds') - if m.get('QuotaName') is not None: - self.quota_name = m.get('QuotaName') - if m.get('ResourceType') is not None: - self.resource_type = m.get('ResourceType') - if m.get('SortBy') is not None: - self.sort_by = m.get('SortBy') - if m.get('Statuses') is not None: - self.statuses = m.get('Statuses') - if m.get('WorkspaceIds') is not None: - self.workspace_ids = m.get('WorkspaceIds') + if m.get('InstanceTypes') is not None: + self.instance_types = m.get('InstanceTypes') return self -class ListQuotasResponseBody(TeaModel): +class ListSpotsStockPreviewResponseBody(TeaModel): def __init__( self, - quotas: List[Quota] = None, request_id: str = None, + spots_stock_preview: List[SpotStockPreview] = None, ): - self.quotas = quotas self.request_id = request_id + self.spots_stock_preview = spots_stock_preview def validate(self): - if self.quotas: - for k in self.quotas: + if self.spots_stock_preview: + for k in self.spots_stock_preview: if k: k.validate() @@ -13328,41 +18017,38 @@ def to_map(self): return _map result = dict() - result['Quotas'] = [] - if self.quotas is not None: - for k in self.quotas: - result['Quotas'].append(k.to_map() if k else None) if self.request_id is not None: result['RequestId'] = self.request_id + result['SpotsStockPreview'] = [] + if self.spots_stock_preview is not None: + for k in self.spots_stock_preview: + result['SpotsStockPreview'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() - self.quotas = [] - if m.get('Quotas') is not None: - for k in m.get('Quotas'): - temp_model = Quota() - self.quotas.append(temp_model.from_map(k)) if m.get('RequestId') is not None: self.request_id = m.get('RequestId') + self.spots_stock_preview = [] + if m.get('SpotsStockPreview') is not None: + for k in m.get('SpotsStockPreview'): + temp_model = SpotStockPreview() + self.spots_stock_preview.append(temp_model.from_map(k)) return self -class ListQuotasResponse(TeaModel): +class ListSpotsStockPreviewResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListQuotasResponseBody = None, + body: ListSpotsStockPreviewResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -13387,37 +18073,19 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListQuotasResponseBody() + temp_model = ListSpotsStockPreviewResponseBody() self.body = temp_model.from_map(m['body']) return self -class ListResourceGroupMachineGroupsRequest(TeaModel): +class ListTagResourcesRequestTag(TeaModel): def __init__( self, - creator_id: str = None, - ecs_spec: str = None, - name: str = None, - order: str = None, - page_number: int = None, - page_size: int = None, - payment_duration: str = None, - payment_duration_unit: str = None, - payment_type: str = None, - sort_by: str = None, - status: str = None, + key: str = None, + value: str = None, ): - self.creator_id = creator_id - self.ecs_spec = ecs_spec - self.name = name - self.order = order - self.page_number = page_number - self.page_size = page_size - self.payment_duration = payment_duration - self.payment_duration_unit = payment_duration_unit - self.payment_type = payment_type - self.sort_by = sort_by - self.status = status + self.key = key + self.value = value def validate(self): pass @@ -13428,71 +18096,41 @@ def to_map(self): return _map result = dict() - if self.creator_id is not None: - result['CreatorID'] = self.creator_id - if self.ecs_spec is not None: - result['EcsSpec'] = self.ecs_spec - if self.name is not None: - result['Name'] = self.name - if self.order is not None: - result['Order'] = self.order - if self.page_number is not None: - result['PageNumber'] = self.page_number - if self.page_size is not None: - result['PageSize'] = self.page_size - if self.payment_duration is not None: - result['PaymentDuration'] = self.payment_duration - if self.payment_duration_unit is not None: - result['PaymentDurationUnit'] = self.payment_duration_unit - if self.payment_type is not None: - result['PaymentType'] = self.payment_type - if self.sort_by is not None: - result['SortBy'] = self.sort_by - if self.status is not None: - result['Status'] = self.status + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value return result def from_map(self, m: dict = None): m = m or dict() - if m.get('CreatorID') is not None: - self.creator_id = m.get('CreatorID') - if m.get('EcsSpec') is not None: - self.ecs_spec = m.get('EcsSpec') - if m.get('Name') is not None: - self.name = m.get('Name') - if m.get('Order') is not None: - self.order = m.get('Order') - if m.get('PageNumber') is not None: - self.page_number = m.get('PageNumber') - if m.get('PageSize') is not None: - self.page_size = m.get('PageSize') - if m.get('PaymentDuration') is not None: - self.payment_duration = m.get('PaymentDuration') - if m.get('PaymentDurationUnit') is not None: - self.payment_duration_unit = m.get('PaymentDurationUnit') - if m.get('PaymentType') is not None: - self.payment_type = m.get('PaymentType') - if m.get('SortBy') is not None: - self.sort_by = m.get('SortBy') - if m.get('Status') is not None: - self.status = m.get('Status') + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') return self -class ListResourceGroupMachineGroupsResponseBody(TeaModel): +class ListTagResourcesRequest(TeaModel): def __init__( self, - machine_groups: List[MachineGroup] = None, - request_id: str = None, - total_count: str = None, + next_token: str = None, + region_id: str = None, + resource_id: List[str] = None, + resource_type: str = None, + tag: List[ListTagResourcesRequestTag] = None, ): - self.machine_groups = machine_groups - self.request_id = request_id - self.total_count = total_count + self.next_token = next_token + # This parameter is required. + self.region_id = region_id + self.resource_id = resource_id + # This parameter is required. + self.resource_type = resource_type + self.tag = tag def validate(self): - if self.machine_groups: - for k in self.machine_groups: + if self.tag: + for k in self.tag: if k: k.validate() @@ -13502,47 +18140,57 @@ def to_map(self): return _map result = dict() - result['MachineGroups'] = [] - if self.machine_groups is not None: - for k in self.machine_groups: - result['MachineGroups'].append(k.to_map() if k else None) - if self.request_id is not None: - result['RequestId'] = self.request_id - if self.total_count is not None: - result['TotalCount'] = self.total_count + if self.next_token is not None: + result['NextToken'] = self.next_token + if self.region_id is not None: + result['RegionId'] = self.region_id + if self.resource_id is not None: + result['ResourceId'] = self.resource_id + if self.resource_type is not None: + result['ResourceType'] = self.resource_type + result['Tag'] = [] + if self.tag is not None: + for k in self.tag: + result['Tag'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() - self.machine_groups = [] - if m.get('MachineGroups') is not None: - for k in m.get('MachineGroups'): - temp_model = MachineGroup() - self.machine_groups.append(temp_model.from_map(k)) - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') - if m.get('TotalCount') is not None: - self.total_count = m.get('TotalCount') + if m.get('NextToken') is not None: + self.next_token = m.get('NextToken') + if m.get('RegionId') is not None: + self.region_id = m.get('RegionId') + if m.get('ResourceId') is not None: + self.resource_id = m.get('ResourceId') + if m.get('ResourceType') is not None: + self.resource_type = m.get('ResourceType') + self.tag = [] + if m.get('Tag') is not None: + for k in m.get('Tag'): + temp_model = ListTagResourcesRequestTag() + self.tag.append(temp_model.from_map(k)) return self -class ListResourceGroupMachineGroupsResponse(TeaModel): +class ListTagResourcesShrinkRequest(TeaModel): def __init__( self, - headers: Dict[str, str] = None, - status_code: int = None, - body: ListResourceGroupMachineGroupsResponseBody = None, + next_token: str = None, + region_id: str = None, + resource_id_shrink: str = None, + resource_type: str = None, + tag_shrink: str = None, ): - self.headers = headers - self.status_code = status_code - self.body = body + self.next_token = next_token + # This parameter is required. + self.region_id = region_id + self.resource_id_shrink = resource_id_shrink + # This parameter is required. + self.resource_type = resource_type + self.tag_shrink = tag_shrink def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() + pass def to_map(self): _map = super().to_map() @@ -13550,48 +18198,45 @@ def to_map(self): return _map result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + if self.next_token is not None: + result['NextToken'] = self.next_token + if self.region_id is not None: + result['RegionId'] = self.region_id + if self.resource_id_shrink is not None: + result['ResourceId'] = self.resource_id_shrink + if self.resource_type is not None: + result['ResourceType'] = self.resource_type + if self.tag_shrink is not None: + result['Tag'] = self.tag_shrink return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = ListResourceGroupMachineGroupsResponseBody() - self.body = temp_model.from_map(m['body']) + if m.get('NextToken') is not None: + self.next_token = m.get('NextToken') + if m.get('RegionId') is not None: + self.region_id = m.get('RegionId') + if m.get('ResourceId') is not None: + self.resource_id_shrink = m.get('ResourceId') + if m.get('ResourceType') is not None: + self.resource_type = m.get('ResourceType') + if m.get('Tag') is not None: + self.tag_shrink = m.get('Tag') return self -class ListResourceGroupsRequest(TeaModel): +class ListTagResourcesResponseBodyTagResources(TeaModel): def __init__( - self, - computing_resource_provider: str = None, - name: str = None, - order: str = None, - page_number: int = None, - page_size: int = None, + self, + resource_id: str = None, resource_type: str = None, - show_all: bool = None, - sort_by: str = None, - status: str = None, + tag_key: str = None, + tag_value: str = None, ): - self.computing_resource_provider = computing_resource_provider - self.name = name - self.order = order - self.page_number = page_number - self.page_size = page_size + self.resource_id = resource_id self.resource_type = resource_type - self.show_all = show_all - self.sort_by = sort_by - self.status = status + self.tag_key = tag_key + self.tag_value = tag_value def validate(self): pass @@ -13602,63 +18247,43 @@ def to_map(self): return _map result = dict() - if self.computing_resource_provider is not None: - result['ComputingResourceProvider'] = self.computing_resource_provider - if self.name is not None: - result['Name'] = self.name - if self.order is not None: - result['Order'] = self.order - if self.page_number is not None: - result['PageNumber'] = self.page_number - if self.page_size is not None: - result['PageSize'] = self.page_size + if self.resource_id is not None: + result['ResourceId'] = self.resource_id if self.resource_type is not None: result['ResourceType'] = self.resource_type - if self.show_all is not None: - result['ShowAll'] = self.show_all - if self.sort_by is not None: - result['SortBy'] = self.sort_by - if self.status is not None: - result['Status'] = self.status + if self.tag_key is not None: + result['TagKey'] = self.tag_key + if self.tag_value is not None: + result['TagValue'] = self.tag_value return result def from_map(self, m: dict = None): m = m or dict() - if m.get('ComputingResourceProvider') is not None: - self.computing_resource_provider = m.get('ComputingResourceProvider') - if m.get('Name') is not None: - self.name = m.get('Name') - if m.get('Order') is not None: - self.order = m.get('Order') - if m.get('PageNumber') is not None: - self.page_number = m.get('PageNumber') - if m.get('PageSize') is not None: - self.page_size = m.get('PageSize') + if m.get('ResourceId') is not None: + self.resource_id = m.get('ResourceId') if m.get('ResourceType') is not None: self.resource_type = m.get('ResourceType') - if m.get('ShowAll') is not None: - self.show_all = m.get('ShowAll') - if m.get('SortBy') is not None: - self.sort_by = m.get('SortBy') - if m.get('Status') is not None: - self.status = m.get('Status') + if m.get('TagKey') is not None: + self.tag_key = m.get('TagKey') + if m.get('TagValue') is not None: + self.tag_value = m.get('TagValue') return self -class ListResourceGroupsResponseBody(TeaModel): +class ListTagResourcesResponseBody(TeaModel): def __init__( self, + next_token: str = None, request_id: str = None, - resource_groups: List[ResourceGroup] = None, - total_count: int = None, + tag_resources: List[ListTagResourcesResponseBodyTagResources] = None, ): + self.next_token = next_token self.request_id = request_id - self.resource_groups = resource_groups - self.total_count = total_count + self.tag_resources = tag_resources def validate(self): - if self.resource_groups: - for k in self.resource_groups: + if self.tag_resources: + for k in self.tag_resources: if k: k.validate() @@ -13668,45 +18293,42 @@ def to_map(self): return _map result = dict() + if self.next_token is not None: + result['NextToken'] = self.next_token if self.request_id is not None: result['RequestId'] = self.request_id - result['ResourceGroups'] = [] - if self.resource_groups is not None: - for k in self.resource_groups: - result['ResourceGroups'].append(k.to_map() if k else None) - if self.total_count is not None: - result['TotalCount'] = self.total_count + result['TagResources'] = [] + if self.tag_resources is not None: + for k in self.tag_resources: + result['TagResources'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() + if m.get('NextToken') is not None: + self.next_token = m.get('NextToken') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - self.resource_groups = [] - if m.get('ResourceGroups') is not None: - for k in m.get('ResourceGroups'): - temp_model = ResourceGroup() - self.resource_groups.append(temp_model.from_map(k)) - if m.get('TotalCount') is not None: - self.total_count = m.get('TotalCount') + self.tag_resources = [] + if m.get('TagResources') is not None: + for k in m.get('TagResources'): + temp_model = ListTagResourcesResponseBodyTagResources() + self.tag_resources.append(temp_model.from_map(k)) return self -class ListResourceGroupsResponse(TeaModel): +class ListTagResourcesResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListResourceGroupsResponseBody = None, + body: ListTagResourcesResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -13731,7 +18353,7 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListResourceGroupsResponseBody() + temp_model = ListTagResourcesResponseBody() self.body = temp_model.from_map(m['body']) return self @@ -13745,9 +18367,11 @@ def __init__( start_time: str = None, token: str = None, ): + # Use the UTC time format: yyyy-MM-ddTHH:mmZ self.end_time = end_time self.page_number = page_number self.page_size = page_size + # Use the UTC time format: yyyy-MM-ddTHH:mmZ self.start_time = start_time self.token = token @@ -13838,9 +18462,6 @@ def __init__( self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -13879,9 +18500,11 @@ def __init__( start_time: str = None, token: str = None, ): + # Use the UTC time format: yyyy-MM-ddTHH:mmZ self.end_time = end_time self.page_number = page_number self.page_size = page_size + # Use the UTC time format: yyyy-MM-ddTHH:mmZ self.start_time = start_time self.token = token @@ -13972,9 +18595,6 @@ def __init__( self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -14014,9 +18634,12 @@ def __init__( time_step: str = None, token: str = None, ): + # Use the UTC time format: yyyy-MM-ddTHH:mmZ self.end_time = end_time self.instance_id = instance_id + # This parameter is required. self.metric_type = metric_type + # Use the UTC time format: yyyy-MM-ddTHH:mmZ self.start_time = start_time self.time_step = time_step self.token = token @@ -14194,9 +18817,6 @@ def __init__( self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -14230,15 +18850,19 @@ class ListTrainingJobLogsRequest(TeaModel): def __init__( self, end_time: str = None, + instance_id: str = None, page_number: int = None, page_size: int = None, start_time: str = None, token: str = None, worker_id: str = None, ): + # Use the UTC time format: yyyy-MM-ddTHH:mmZ self.end_time = end_time + self.instance_id = instance_id self.page_number = page_number self.page_size = page_size + # Use the UTC time format: yyyy-MM-ddTHH:mmZ self.start_time = start_time self.token = token self.worker_id = worker_id @@ -14254,6 +18878,8 @@ def to_map(self): result = dict() if self.end_time is not None: result['EndTime'] = self.end_time + if self.instance_id is not None: + result['InstanceId'] = self.instance_id if self.page_number is not None: result['PageNumber'] = self.page_number if self.page_size is not None: @@ -14270,6 +18896,8 @@ def from_map(self, m: dict = None): m = m or dict() if m.get('EndTime') is not None: self.end_time = m.get('EndTime') + if m.get('InstanceId') is not None: + self.instance_id = m.get('InstanceId') if m.get('PageNumber') is not None: self.page_number = m.get('PageNumber') if m.get('PageSize') is not None: @@ -14334,9 +18962,6 @@ def __init__( self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -14377,11 +19002,13 @@ def __init__( start_time: str = None, token: str = None, ): + # Use the UTC time format: yyyy-MM-ddTHH:mmZ self.end_time = end_time self.name = name self.order = order self.page_number = page_number self.page_size = page_size + # Use the UTC time format: yyyy-MM-ddTHH:mmZ self.start_time = start_time self.token = token @@ -14437,6 +19064,7 @@ def __init__( value: float = None, ): self.name = name + # Use the UTC time format: yyyy-MM-ddTHH:mmZ self.timestamp = timestamp self.value = value @@ -14449,37 +19077,260 @@ def to_map(self): return _map result = dict() - if self.name is not None: - result['Name'] = self.name - if self.timestamp is not None: - result['Timestamp'] = self.timestamp - if self.value is not None: - result['Value'] = self.value + if self.name is not None: + result['Name'] = self.name + if self.timestamp is not None: + result['Timestamp'] = self.timestamp + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('Timestamp') is not None: + self.timestamp = m.get('Timestamp') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + +class ListTrainingJobMetricsResponseBody(TeaModel): + def __init__( + self, + metrics: List[ListTrainingJobMetricsResponseBodyMetrics] = None, + request_id: str = None, + ): + self.metrics = metrics + self.request_id = request_id + + def validate(self): + if self.metrics: + for k in self.metrics: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + result['Metrics'] = [] + if self.metrics is not None: + for k in self.metrics: + result['Metrics'].append(k.to_map() if k else None) + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + self.metrics = [] + if m.get('Metrics') is not None: + for k in m.get('Metrics'): + temp_model = ListTrainingJobMetricsResponseBodyMetrics() + self.metrics.append(temp_model.from_map(k)) + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class ListTrainingJobMetricsResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: ListTrainingJobMetricsResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = ListTrainingJobMetricsResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class ListTrainingJobOutputModelsRequest(TeaModel): + def __init__( + self, + token: str = None, + ): + self.token = token + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.token is not None: + result['Token'] = self.token + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Token') is not None: + self.token = m.get('Token') + return self + + +class ListTrainingJobOutputModelsResponseBodyOutputModelsLabels(TeaModel): + def __init__( + self, + name: str = None, + value: str = None, + ): + self.name = name + self.value = value + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.name is not None: + result['Name'] = self.name + if self.value is not None: + result['Value'] = self.value + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Name') is not None: + self.name = m.get('Name') + if m.get('Value') is not None: + self.value = m.get('Value') + return self + + +class ListTrainingJobOutputModelsResponseBodyOutputModels(TeaModel): + def __init__( + self, + evaluation_spec: Dict[str, Any] = None, + inference_spec: Dict[str, Any] = None, + labels: List[ListTrainingJobOutputModelsResponseBodyOutputModelsLabels] = None, + metrics: Dict[str, Any] = None, + output_channel_name: str = None, + source_id: str = None, + source_type: str = None, + training_spec: Dict[str, Any] = None, + uri: str = None, + ): + self.evaluation_spec = evaluation_spec + self.inference_spec = inference_spec + self.labels = labels + self.metrics = metrics + self.output_channel_name = output_channel_name + self.source_id = source_id + self.source_type = source_type + self.training_spec = training_spec + self.uri = uri + + def validate(self): + if self.labels: + for k in self.labels: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.evaluation_spec is not None: + result['EvaluationSpec'] = self.evaluation_spec + if self.inference_spec is not None: + result['InferenceSpec'] = self.inference_spec + result['Labels'] = [] + if self.labels is not None: + for k in self.labels: + result['Labels'].append(k.to_map() if k else None) + if self.metrics is not None: + result['Metrics'] = self.metrics + if self.output_channel_name is not None: + result['OutputChannelName'] = self.output_channel_name + if self.source_id is not None: + result['SourceId'] = self.source_id + if self.source_type is not None: + result['SourceType'] = self.source_type + if self.training_spec is not None: + result['TrainingSpec'] = self.training_spec + if self.uri is not None: + result['Uri'] = self.uri return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Name') is not None: - self.name = m.get('Name') - if m.get('Timestamp') is not None: - self.timestamp = m.get('Timestamp') - if m.get('Value') is not None: - self.value = m.get('Value') + if m.get('EvaluationSpec') is not None: + self.evaluation_spec = m.get('EvaluationSpec') + if m.get('InferenceSpec') is not None: + self.inference_spec = m.get('InferenceSpec') + self.labels = [] + if m.get('Labels') is not None: + for k in m.get('Labels'): + temp_model = ListTrainingJobOutputModelsResponseBodyOutputModelsLabels() + self.labels.append(temp_model.from_map(k)) + if m.get('Metrics') is not None: + self.metrics = m.get('Metrics') + if m.get('OutputChannelName') is not None: + self.output_channel_name = m.get('OutputChannelName') + if m.get('SourceId') is not None: + self.source_id = m.get('SourceId') + if m.get('SourceType') is not None: + self.source_type = m.get('SourceType') + if m.get('TrainingSpec') is not None: + self.training_spec = m.get('TrainingSpec') + if m.get('Uri') is not None: + self.uri = m.get('Uri') return self -class ListTrainingJobMetricsResponseBody(TeaModel): +class ListTrainingJobOutputModelsResponseBody(TeaModel): def __init__( self, - metrics: List[ListTrainingJobMetricsResponseBodyMetrics] = None, - request_id: str = None, + output_models: List[ListTrainingJobOutputModelsResponseBodyOutputModels] = None, ): - self.metrics = metrics - self.request_id = request_id + self.output_models = output_models def validate(self): - if self.metrics: - for k in self.metrics: + if self.output_models: + for k in self.output_models: if k: k.validate() @@ -14489,41 +19340,34 @@ def to_map(self): return _map result = dict() - result['Metrics'] = [] - if self.metrics is not None: - for k in self.metrics: - result['Metrics'].append(k.to_map() if k else None) - if self.request_id is not None: - result['RequestId'] = self.request_id + result['OutputModels'] = [] + if self.output_models is not None: + for k in self.output_models: + result['OutputModels'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() - self.metrics = [] - if m.get('Metrics') is not None: - for k in m.get('Metrics'): - temp_model = ListTrainingJobMetricsResponseBodyMetrics() - self.metrics.append(temp_model.from_map(k)) - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') + self.output_models = [] + if m.get('OutputModels') is not None: + for k in m.get('OutputModels'): + temp_model = ListTrainingJobOutputModelsResponseBodyOutputModels() + self.output_models.append(temp_model.from_map(k)) return self -class ListTrainingJobMetricsResponse(TeaModel): +class ListTrainingJobOutputModelsResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListTrainingJobMetricsResponseBody = None, + body: ListTrainingJobOutputModelsResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -14548,7 +19392,7 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListTrainingJobMetricsResponseBody() + temp_model = ListTrainingJobOutputModelsResponseBody() self.body = temp_model.from_map(m['body']) return self @@ -14763,17 +19607,75 @@ def from_map(self, m: dict = None): return self +class ListTrainingJobsResponseBodyTrainingJobsComputeResourceInstanceSpec(TeaModel): + def __init__( + self, + cpu: str = None, + gpu: str = None, + gputype: str = None, + memory: str = None, + shared_memory: str = None, + ): + self.cpu = cpu + self.gpu = gpu + self.gputype = gputype + self.memory = memory + self.shared_memory = shared_memory + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.cpu is not None: + result['CPU'] = self.cpu + if self.gpu is not None: + result['GPU'] = self.gpu + if self.gputype is not None: + result['GPUType'] = self.gputype + if self.memory is not None: + result['Memory'] = self.memory + if self.shared_memory is not None: + result['SharedMemory'] = self.shared_memory + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('CPU') is not None: + self.cpu = m.get('CPU') + if m.get('GPU') is not None: + self.gpu = m.get('GPU') + if m.get('GPUType') is not None: + self.gputype = m.get('GPUType') + if m.get('Memory') is not None: + self.memory = m.get('Memory') + if m.get('SharedMemory') is not None: + self.shared_memory = m.get('SharedMemory') + return self + + class ListTrainingJobsResponseBodyTrainingJobsComputeResource(TeaModel): def __init__( self, ecs_count: int = None, ecs_spec: str = None, + instance_count: int = None, + instance_spec: ListTrainingJobsResponseBodyTrainingJobsComputeResourceInstanceSpec = None, + resource_id: str = None, ): self.ecs_count = ecs_count self.ecs_spec = ecs_spec + self.instance_count = instance_count + self.instance_spec = instance_spec + self.resource_id = resource_id def validate(self): - pass + if self.instance_spec: + self.instance_spec.validate() def to_map(self): _map = super().to_map() @@ -14785,6 +19687,12 @@ def to_map(self): result['EcsCount'] = self.ecs_count if self.ecs_spec is not None: result['EcsSpec'] = self.ecs_spec + if self.instance_count is not None: + result['InstanceCount'] = self.instance_count + if self.instance_spec is not None: + result['InstanceSpec'] = self.instance_spec.to_map() + if self.resource_id is not None: + result['ResourceId'] = self.resource_id return result def from_map(self, m: dict = None): @@ -14793,6 +19701,46 @@ def from_map(self, m: dict = None): self.ecs_count = m.get('EcsCount') if m.get('EcsSpec') is not None: self.ecs_spec = m.get('EcsSpec') + if m.get('InstanceCount') is not None: + self.instance_count = m.get('InstanceCount') + if m.get('InstanceSpec') is not None: + temp_model = ListTrainingJobsResponseBodyTrainingJobsComputeResourceInstanceSpec() + self.instance_spec = temp_model.from_map(m['InstanceSpec']) + if m.get('ResourceId') is not None: + self.resource_id = m.get('ResourceId') + return self + + +class ListTrainingJobsResponseBodyTrainingJobsExperimentConfig(TeaModel): + def __init__( + self, + experiment_id: str = None, + experiment_name: str = None, + ): + self.experiment_id = experiment_id + self.experiment_name = experiment_name + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.experiment_id is not None: + result['ExperimentId'] = self.experiment_id + if self.experiment_name is not None: + result['ExperimentName'] = self.experiment_name + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('ExperimentId') is not None: + self.experiment_id = m.get('ExperimentId') + if m.get('ExperimentName') is not None: + self.experiment_name = m.get('ExperimentName') return self @@ -15021,11 +19969,13 @@ def from_map(self, m: dict = None): class ListTrainingJobsResponseBodyTrainingJobsUserVpc(TeaModel): def __init__( self, + default_route: str = None, extended_cidrs: List[str] = None, security_group_id: str = None, switch_id: str = None, vpc_id: str = None, ): + self.default_route = default_route self.extended_cidrs = extended_cidrs self.security_group_id = security_group_id self.switch_id = switch_id @@ -15040,6 +19990,8 @@ def to_map(self): return _map result = dict() + if self.default_route is not None: + result['DefaultRoute'] = self.default_route if self.extended_cidrs is not None: result['ExtendedCIDRs'] = self.extended_cidrs if self.security_group_id is not None: @@ -15052,6 +20004,8 @@ def to_map(self): def from_map(self, m: dict = None): m = m or dict() + if m.get('DefaultRoute') is not None: + self.default_route = m.get('DefaultRoute') if m.get('ExtendedCIDRs') is not None: self.extended_cidrs = m.get('ExtendedCIDRs') if m.get('SecurityGroupId') is not None: @@ -15070,6 +20024,8 @@ def __init__( algorithm_provider: str = None, algorithm_version: str = None, compute_resource: ListTrainingJobsResponseBodyTrainingJobsComputeResource = None, + environments: Dict[str, str] = None, + experiment_config: ListTrainingJobsResponseBodyTrainingJobsExperimentConfig = None, gmt_create_time: str = None, gmt_modified_time: str = None, hyper_parameters: List[ListTrainingJobsResponseBodyTrainingJobsHyperParameters] = None, @@ -15077,6 +20033,7 @@ def __init__( is_temp_algo: bool = None, labels: List[ListTrainingJobsResponseBodyTrainingJobsLabels] = None, output_channels: List[ListTrainingJobsResponseBodyTrainingJobsOutputChannels] = None, + python_requirements: List[str] = None, reason_code: str = None, reason_message: str = None, role_arn: str = None, @@ -15094,6 +20051,8 @@ def __init__( self.algorithm_provider = algorithm_provider self.algorithm_version = algorithm_version self.compute_resource = compute_resource + self.environments = environments + self.experiment_config = experiment_config self.gmt_create_time = gmt_create_time self.gmt_modified_time = gmt_modified_time self.hyper_parameters = hyper_parameters @@ -15101,6 +20060,7 @@ def __init__( self.is_temp_algo = is_temp_algo self.labels = labels self.output_channels = output_channels + self.python_requirements = python_requirements self.reason_code = reason_code self.reason_message = reason_message self.role_arn = role_arn @@ -15117,6 +20077,8 @@ def __init__( def validate(self): if self.compute_resource: self.compute_resource.validate() + if self.experiment_config: + self.experiment_config.validate() if self.hyper_parameters: for k in self.hyper_parameters: if k: @@ -15156,6 +20118,10 @@ def to_map(self): result['AlgorithmVersion'] = self.algorithm_version if self.compute_resource is not None: result['ComputeResource'] = self.compute_resource.to_map() + if self.environments is not None: + result['Environments'] = self.environments + if self.experiment_config is not None: + result['ExperimentConfig'] = self.experiment_config.to_map() if self.gmt_create_time is not None: result['GmtCreateTime'] = self.gmt_create_time if self.gmt_modified_time is not None: @@ -15178,6 +20144,8 @@ def to_map(self): if self.output_channels is not None: for k in self.output_channels: result['OutputChannels'].append(k.to_map() if k else None) + if self.python_requirements is not None: + result['PythonRequirements'] = self.python_requirements if self.reason_code is not None: result['ReasonCode'] = self.reason_code if self.reason_message is not None: @@ -15217,6 +20185,11 @@ def from_map(self, m: dict = None): if m.get('ComputeResource') is not None: temp_model = ListTrainingJobsResponseBodyTrainingJobsComputeResource() self.compute_resource = temp_model.from_map(m['ComputeResource']) + if m.get('Environments') is not None: + self.environments = m.get('Environments') + if m.get('ExperimentConfig') is not None: + temp_model = ListTrainingJobsResponseBodyTrainingJobsExperimentConfig() + self.experiment_config = temp_model.from_map(m['ExperimentConfig']) if m.get('GmtCreateTime') is not None: self.gmt_create_time = m.get('GmtCreateTime') if m.get('GmtModifiedTime') is not None: @@ -15243,6 +20216,8 @@ def from_map(self, m: dict = None): for k in m.get('OutputChannels'): temp_model = ListTrainingJobsResponseBodyTrainingJobsOutputChannels() self.output_channels.append(temp_model.from_map(k)) + if m.get('PythonRequirements') is not None: + self.python_requirements = m.get('PythonRequirements') if m.get('ReasonCode') is not None: self.reason_code = m.get('ReasonCode') if m.get('ReasonMessage') is not None: @@ -15300,43 +20275,149 @@ def to_map(self): result = dict() if self.request_id is not None: result['RequestId'] = self.request_id - if self.total_count is not None: - result['TotalCount'] = self.total_count - result['TrainingJobs'] = [] - if self.training_jobs is not None: - for k in self.training_jobs: - result['TrainingJobs'].append(k.to_map() if k else None) + if self.total_count is not None: + result['TotalCount'] = self.total_count + result['TrainingJobs'] = [] + if self.training_jobs is not None: + for k in self.training_jobs: + result['TrainingJobs'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + if m.get('TotalCount') is not None: + self.total_count = m.get('TotalCount') + self.training_jobs = [] + if m.get('TrainingJobs') is not None: + for k in m.get('TrainingJobs'): + temp_model = ListTrainingJobsResponseBodyTrainingJobs() + self.training_jobs.append(temp_model.from_map(k)) + return self + + +class ListTrainingJobsResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: ListTrainingJobsResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = ListTrainingJobsResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class OperateNodeRequest(TeaModel): + def __init__( + self, + operation: str = None, + resource_group_id: str = None, + ): + # This parameter is required. + self.operation = operation + # This parameter is required. + self.resource_group_id = resource_group_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.operation is not None: + result['Operation'] = self.operation + if self.resource_group_id is not None: + result['ResourceGroupId'] = self.resource_group_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Operation') is not None: + self.operation = m.get('Operation') + if m.get('ResourceGroupId') is not None: + self.resource_group_id = m.get('ResourceGroupId') + return self + + +class OperateNodeResponseBody(TeaModel): + def __init__( + self, + node_id: str = None, + request_id: str = None, + ): + self.node_id = node_id + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.node_id is not None: + result['NodeId'] = self.node_id + if self.request_id is not None: + result['RequestId'] = self.request_id return result def from_map(self, m: dict = None): m = m or dict() + if m.get('NodeId') is not None: + self.node_id = m.get('NodeId') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') - if m.get('TotalCount') is not None: - self.total_count = m.get('TotalCount') - self.training_jobs = [] - if m.get('TrainingJobs') is not None: - for k in m.get('TrainingJobs'): - temp_model = ListTrainingJobsResponseBodyTrainingJobs() - self.training_jobs.append(temp_model.from_map(k)) return self -class ListTrainingJobsResponse(TeaModel): +class OperateNodeResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ListTrainingJobsResponseBody = None, + body: OperateNodeResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -15361,7 +20442,7 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ListTrainingJobsResponseBody() + temp_model = OperateNodeResponseBody() self.body = temp_model.from_map(m['body']) return self @@ -15372,6 +20453,7 @@ def __init__( target_algorithm_name: str = None, update_if_exists: bool = None, ): + # This parameter is required. self.target_algorithm_name = target_algorithm_name self.update_if_exists = update_if_exists @@ -15444,9 +20526,6 @@ def __init__( self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -15483,6 +20562,7 @@ def __init__( target_algorithm_version: str = None, update_if_exists: bool = None, ): + # This parameter is required. self.target_algorithm_name = target_algorithm_name self.target_algorithm_version = target_algorithm_version self.update_if_exists = update_if_exists @@ -15554,21 +20634,264 @@ def from_map(self, m: dict = None): return self -class ReleaseAlgorithmVersionResponse(TeaModel): +class ReleaseAlgorithmVersionResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: ReleaseAlgorithmVersionResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = ReleaseAlgorithmVersionResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class ReleaseMachineGroupResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['requestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('requestId') is not None: + self.request_id = m.get('requestId') + return self + + +class ReleaseMachineGroupResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: ReleaseMachineGroupResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = ReleaseMachineGroupResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class ScaleQuotaRequest(TeaModel): + def __init__( + self, + min: ResourceSpec = None, + resource_group_ids: List[str] = None, + ): + self.min = min + self.resource_group_ids = resource_group_ids + + def validate(self): + if self.min: + self.min.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.min is not None: + result['Min'] = self.min.to_map() + if self.resource_group_ids is not None: + result['ResourceGroupIds'] = self.resource_group_ids + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Min') is not None: + temp_model = ResourceSpec() + self.min = temp_model.from_map(m['Min']) + if m.get('ResourceGroupIds') is not None: + self.resource_group_ids = m.get('ResourceGroupIds') + return self + + +class ScaleQuotaResponseBody(TeaModel): + def __init__( + self, + quota_id: str = None, + request_id: str = None, + ): + # Quota Id + self.quota_id = quota_id + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.quota_id is not None: + result['QuotaId'] = self.quota_id + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('QuotaId') is not None: + self.quota_id = m.get('QuotaId') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class ScaleQuotaResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: ScaleQuotaResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = ScaleQuotaResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + +class StopTrainingJobResponseBody(TeaModel): + def __init__( + self, + request_id: str = None, + ): + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class StopTrainingJobResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ReleaseAlgorithmVersionResponseBody = None, + body: StopTrainingJobResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -15593,23 +20916,22 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ReleaseAlgorithmVersionResponseBody() + temp_model = StopTrainingJobResponseBody() self.body = temp_model.from_map(m['body']) return self -class ScaleQuotaRequest(TeaModel): +class TagResourcesRequestTag(TeaModel): def __init__( self, - min: AllocateStrategySpec = None, - resource_group_ids: List[str] = None, + key: str = None, + value: str = None, ): - self.min = min - self.resource_group_ids = resource_group_ids + self.key = key + self.value = value def validate(self): - if self.min: - self.min.validate() + pass def to_map(self): _map = super().to_map() @@ -15617,29 +20939,79 @@ def to_map(self): return _map result = dict() - if self.min is not None: - result['Min'] = self.min.to_map() - if self.resource_group_ids is not None: - result['ResourceGroupIds'] = self.resource_group_ids + if self.key is not None: + result['Key'] = self.key + if self.value is not None: + result['Value'] = self.value return result def from_map(self, m: dict = None): m = m or dict() - if m.get('Min') is not None: - temp_model = AllocateStrategySpec() - self.min = temp_model.from_map(m['Min']) - if m.get('ResourceGroupIds') is not None: - self.resource_group_ids = m.get('ResourceGroupIds') + if m.get('Key') is not None: + self.key = m.get('Key') + if m.get('Value') is not None: + self.value = m.get('Value') return self -class ScaleQuotaResponseBody(TeaModel): +class TagResourcesRequest(TeaModel): + def __init__( + self, + region_id: str = None, + resource_id: List[str] = None, + resource_type: str = None, + tag: List[TagResourcesRequestTag] = None, + ): + self.region_id = region_id + self.resource_id = resource_id + self.resource_type = resource_type + self.tag = tag + + def validate(self): + if self.tag: + for k in self.tag: + if k: + k.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.region_id is not None: + result['RegionId'] = self.region_id + if self.resource_id is not None: + result['ResourceId'] = self.resource_id + if self.resource_type is not None: + result['ResourceType'] = self.resource_type + result['Tag'] = [] + if self.tag is not None: + for k in self.tag: + result['Tag'].append(k.to_map() if k else None) + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('RegionId') is not None: + self.region_id = m.get('RegionId') + if m.get('ResourceId') is not None: + self.resource_id = m.get('ResourceId') + if m.get('ResourceType') is not None: + self.resource_type = m.get('ResourceType') + self.tag = [] + if m.get('Tag') is not None: + for k in m.get('Tag'): + temp_model = TagResourcesRequestTag() + self.tag.append(temp_model.from_map(k)) + return self + + +class TagResourcesResponseBody(TeaModel): def __init__( self, - quota_id: str = None, request_id: str = None, ): - self.quota_id = quota_id self.request_id = request_id def validate(self): @@ -15651,36 +21023,29 @@ def to_map(self): return _map result = dict() - if self.quota_id is not None: - result['QuotaId'] = self.quota_id if self.request_id is not None: result['RequestId'] = self.request_id return result def from_map(self, m: dict = None): m = m or dict() - if m.get('QuotaId') is not None: - self.quota_id = m.get('QuotaId') if m.get('RequestId') is not None: self.request_id = m.get('RequestId') return self -class ScaleQuotaResponse(TeaModel): +class TagResourcesResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: ScaleQuotaResponseBody = None, + body: TagResourcesResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -15705,17 +21070,28 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = ScaleQuotaResponseBody() + temp_model = TagResourcesResponseBody() self.body = temp_model.from_map(m['body']) return self -class StopArrearsTrainingJobResponseBody(TeaModel): +class UntagResourcesRequest(TeaModel): def __init__( self, - request_id: str = None, + all: bool = None, + region_id: str = None, + resource_id: List[str] = None, + resource_type: str = None, + tag_key: List[str] = None, ): - self.request_id = request_id + self.all = all + # This parameter is required. + self.region_id = region_id + # This parameter is required. + self.resource_id = resource_id + # This parameter is required. + self.resource_type = resource_type + self.tag_key = tag_key def validate(self): pass @@ -15726,34 +21102,53 @@ def to_map(self): return _map result = dict() - if self.request_id is not None: - result['RequestId'] = self.request_id + if self.all is not None: + result['All'] = self.all + if self.region_id is not None: + result['RegionId'] = self.region_id + if self.resource_id is not None: + result['ResourceId'] = self.resource_id + if self.resource_type is not None: + result['ResourceType'] = self.resource_type + if self.tag_key is not None: + result['TagKey'] = self.tag_key return result def from_map(self, m: dict = None): m = m or dict() - if m.get('RequestId') is not None: - self.request_id = m.get('RequestId') + if m.get('All') is not None: + self.all = m.get('All') + if m.get('RegionId') is not None: + self.region_id = m.get('RegionId') + if m.get('ResourceId') is not None: + self.resource_id = m.get('ResourceId') + if m.get('ResourceType') is not None: + self.resource_type = m.get('ResourceType') + if m.get('TagKey') is not None: + self.tag_key = m.get('TagKey') return self -class StopArrearsTrainingJobResponse(TeaModel): +class UntagResourcesShrinkRequest(TeaModel): def __init__( self, - headers: Dict[str, str] = None, - status_code: int = None, - body: StopArrearsTrainingJobResponseBody = None, + all: bool = None, + region_id: str = None, + resource_id_shrink: str = None, + resource_type: str = None, + tag_key_shrink: str = None, ): - self.headers = headers - self.status_code = status_code - self.body = body + self.all = all + # This parameter is required. + self.region_id = region_id + # This parameter is required. + self.resource_id_shrink = resource_id_shrink + # This parameter is required. + self.resource_type = resource_type + self.tag_key_shrink = tag_key_shrink def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') - if self.body: - self.body.validate() + pass def to_map(self): _map = super().to_map() @@ -15761,27 +21156,34 @@ def to_map(self): return _map result = dict() - if self.headers is not None: - result['headers'] = self.headers - if self.status_code is not None: - result['statusCode'] = self.status_code - if self.body is not None: - result['body'] = self.body.to_map() + if self.all is not None: + result['All'] = self.all + if self.region_id is not None: + result['RegionId'] = self.region_id + if self.resource_id_shrink is not None: + result['ResourceId'] = self.resource_id_shrink + if self.resource_type is not None: + result['ResourceType'] = self.resource_type + if self.tag_key_shrink is not None: + result['TagKey'] = self.tag_key_shrink return result def from_map(self, m: dict = None): m = m or dict() - if m.get('headers') is not None: - self.headers = m.get('headers') - if m.get('statusCode') is not None: - self.status_code = m.get('statusCode') - if m.get('body') is not None: - temp_model = StopArrearsTrainingJobResponseBody() - self.body = temp_model.from_map(m['body']) + if m.get('All') is not None: + self.all = m.get('All') + if m.get('RegionId') is not None: + self.region_id = m.get('RegionId') + if m.get('ResourceId') is not None: + self.resource_id_shrink = m.get('ResourceId') + if m.get('ResourceType') is not None: + self.resource_type = m.get('ResourceType') + if m.get('TagKey') is not None: + self.tag_key_shrink = m.get('TagKey') return self -class StopTrainingJobResponseBody(TeaModel): +class UntagResourcesResponseBody(TeaModel): def __init__( self, request_id: str = None, @@ -15808,21 +21210,18 @@ def from_map(self, m: dict = None): return self -class StopTrainingJobResponse(TeaModel): +class UntagResourcesResponse(TeaModel): def __init__( self, headers: Dict[str, str] = None, status_code: int = None, - body: StopTrainingJobResponseBody = None, + body: UntagResourcesResponseBody = None, ): self.headers = headers self.status_code = status_code self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -15847,7 +21246,7 @@ def from_map(self, m: dict = None): if m.get('statusCode') is not None: self.status_code = m.get('statusCode') if m.get('body') is not None: - temp_model = StopTrainingJobResponseBody() + temp_model = UntagResourcesResponseBody() self.body = temp_model.from_map(m['body']) return self @@ -15930,9 +21329,6 @@ def __init__( self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -16063,9 +21459,6 @@ def __init__( self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -16181,9 +21574,6 @@ def __init__( self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -16293,9 +21683,6 @@ def __init__( self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -16370,9 +21757,6 @@ def __init__( self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -16407,15 +21791,23 @@ def __init__( self, description: str = None, labels: List[Label] = None, + queue_strategy: str = None, + quota_config: QuotaConfig = None, + quota_name: str = None, ): self.description = description self.labels = labels + self.queue_strategy = queue_strategy + self.quota_config = quota_config + self.quota_name = quota_name def validate(self): if self.labels: for k in self.labels: if k: k.validate() + if self.quota_config: + self.quota_config.validate() def to_map(self): _map = super().to_map() @@ -16429,6 +21821,12 @@ def to_map(self): if self.labels is not None: for k in self.labels: result['Labels'].append(k.to_map() if k else None) + if self.queue_strategy is not None: + result['QueueStrategy'] = self.queue_strategy + if self.quota_config is not None: + result['QuotaConfig'] = self.quota_config.to_map() + if self.quota_name is not None: + result['QuotaName'] = self.quota_name return result def from_map(self, m: dict = None): @@ -16440,6 +21838,13 @@ def from_map(self, m: dict = None): for k in m.get('Labels'): temp_model = Label() self.labels.append(temp_model.from_map(k)) + if m.get('QueueStrategy') is not None: + self.queue_strategy = m.get('QueueStrategy') + if m.get('QuotaConfig') is not None: + temp_model = QuotaConfig() + self.quota_config = temp_model.from_map(m['QuotaConfig']) + if m.get('QuotaName') is not None: + self.quota_name = m.get('QuotaName') return self @@ -16449,6 +21854,7 @@ def __init__( quota_id: str = None, request_id: str = None, ): + # Quota Id self.quota_id = quota_id self.request_id = request_id @@ -16488,9 +21894,6 @@ def __init__( self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -16600,9 +22003,6 @@ def __init__( self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -16635,9 +22035,13 @@ def from_map(self, m: dict = None): class UpdateResourceGroupRequest(TeaModel): def __init__( self, + description: str = None, + name: str = None, unbind: bool = None, user_vpc: UserVpc = None, ): + self.description = description + self.name = name self.unbind = unbind self.user_vpc = user_vpc @@ -16651,6 +22055,10 @@ def to_map(self): return _map result = dict() + if self.description is not None: + result['Description'] = self.description + if self.name is not None: + result['Name'] = self.name if self.unbind is not None: result['Unbind'] = self.unbind if self.user_vpc is not None: @@ -16659,6 +22067,10 @@ def to_map(self): def from_map(self, m: dict = None): m = m or dict() + if m.get('Description') is not None: + self.description = m.get('Description') + if m.get('Name') is not None: + self.name = m.get('Name') if m.get('Unbind') is not None: self.unbind = m.get('Unbind') if m.get('UserVpc') is not None: @@ -16712,9 +22124,6 @@ def __init__( self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() @@ -16744,6 +22153,107 @@ def from_map(self, m: dict = None): return self +class UpdateResourceGroupMachineGroupRequest(TeaModel): + def __init__( + self, + name: str = None, + ): + self.name = name + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.name is not None: + result['Name'] = self.name + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('Name') is not None: + self.name = m.get('Name') + return self + + +class UpdateResourceGroupMachineGroupResponseBody(TeaModel): + def __init__( + self, + machine_group_id: str = None, + request_id: str = None, + ): + self.machine_group_id = machine_group_id + self.request_id = request_id + + def validate(self): + pass + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.machine_group_id is not None: + result['MachineGroupID'] = self.machine_group_id + if self.request_id is not None: + result['RequestId'] = self.request_id + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('MachineGroupID') is not None: + self.machine_group_id = m.get('MachineGroupID') + if m.get('RequestId') is not None: + self.request_id = m.get('RequestId') + return self + + +class UpdateResourceGroupMachineGroupResponse(TeaModel): + def __init__( + self, + headers: Dict[str, str] = None, + status_code: int = None, + body: UpdateResourceGroupMachineGroupResponseBody = None, + ): + self.headers = headers + self.status_code = status_code + self.body = body + + def validate(self): + if self.body: + self.body.validate() + + def to_map(self): + _map = super().to_map() + if _map is not None: + return _map + + result = dict() + if self.headers is not None: + result['headers'] = self.headers + if self.status_code is not None: + result['statusCode'] = self.status_code + if self.body is not None: + result['body'] = self.body.to_map() + return result + + def from_map(self, m: dict = None): + m = m or dict() + if m.get('headers') is not None: + self.headers = m.get('headers') + if m.get('statusCode') is not None: + self.status_code = m.get('statusCode') + if m.get('body') is not None: + temp_model = UpdateResourceGroupMachineGroupResponseBody() + self.body = temp_model.from_map(m['body']) + return self + + class UpdateTrainingJobLabelsRequestLabels(TeaModel): def __init__( self, @@ -16851,9 +22361,6 @@ def __init__( self.body = body def validate(self): - self.validate_required(self.headers, 'headers') - self.validate_required(self.status_code, 'status_code') - self.validate_required(self.body, 'body') if self.body: self.body.validate() diff --git a/pai/model/__init__.py b/pai/model/__init__.py new file mode 100644 index 0000000..62e551e --- /dev/null +++ b/pai/model/__init__.py @@ -0,0 +1,48 @@ +# Copyright 2024 Alibaba, Inc. or its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from ._model import ( + InferenceSpec, + Model, + ModelFormat, + NfsStorageConfig, + NodeStorageConfig, + OssStorageConfig, + RawStorageConfig, + RegisteredModel, + ResourceConfig, + SharedMemoryConfig, + StorageConfigBase, + container_serving_spec, +) +from ._model_recipe import ModelRecipe, ModelRecipeType, ModelTrainingRecipe + +__all__ = [ + "RegisteredModel", + "Model", + "ModelFormat", + "InferenceSpec", + "ResourceConfig", + "container_serving_spec", + "ModelTrainingRecipe", + "ModelRecipe", + "ModelRecipeType", + "StorageConfigBase", + "NfsStorageConfig", + "NodeStorageConfig", + "SharedMemoryConfig", + "OssStorageConfig", + "RawStorageConfig", +] diff --git a/pai/model.py b/pai/model/_model.py similarity index 74% rename from pai/model.py rename to pai/model/_model.py index b631231..c65ace2 100644 --- a/pai/model.py +++ b/pai/model/_model.py @@ -15,7 +15,6 @@ import copy import distutils.dir_util import json -import logging import os.path import posixpath import shlex @@ -23,33 +22,45 @@ import tempfile import textwrap import time +import typing +import warnings +from abc import ABCMeta, abstractmethod from typing import Any, Dict, Iterator, List, Optional, Tuple, Union import requests from addict import Dict as AttrDict from oss2 import ObjectIterator -from .common import git_utils -from .common.consts import INSTANCE_TYPE_LOCAL_GPU, ModelFormat -from .common.docker_utils import ContainerRun, run_container -from .common.oss_utils import OssUriObj, download, is_oss_uri, upload -from .common.utils import ( +from ..common import ProviderAlibabaPAI, git_utils +from ..common.consts import INSTANCE_TYPE_LOCAL_GPU, ModelFormat, StoragePathCategory +from ..common.docker_utils import ContainerRun, run_container +from ..common.logging import get_logger +from ..common.oss_utils import OssUriObj, download, is_oss_uri, upload +from ..common.utils import ( generate_repr, is_local_run_instance_type, random_str, to_plain_text, ) -from .exception import DuplicatedMountException, MountPathIsOccupiedException -from .image import ImageInfo -from .predictor import AsyncPredictor, LocalPredictor, Predictor, ServiceType -from .serializers import SerializerBase -from .session import Session, get_default_session +from ..exception import DuplicatedMountException +from ..image import ImageInfo +from ..job._training_job import InstanceSpec, ModelRecipeSpec, UriInput, UserVpcConfig +from ..predictor import AsyncPredictor, LocalPredictor, Predictor, ServiceType +from ..serializers import SerializerBase +from ..session import Session, get_default_session -logger = logging.getLogger(__name__) +if typing.TYPE_CHECKING: + from ..estimator import AlgorithmEstimator + from ._model_recipe import ModelRecipe, ModelRecipeType, ModelTrainingRecipe + +logger = get_logger(__name__) # Reserved ports for internal use, do not use them for service _RESERVED_PORTS = [8080, 9090] +# Default model upstream source +MODEL_TASK_CREATED_BY_QUICKSTART = "QuickStart" + class DefaultServiceConfig(object): """Default configuration used in creating prediction service.""" @@ -64,6 +75,120 @@ class DefaultServiceConfig(object): code_path = "/ml/usercode/" +class StorageConfigBase(metaclass=ABCMeta): + """Base Storage Configuration.""" + + @abstractmethod + def to_dict(self): + pass + + +class RawStorageConfig(StorageConfigBase): + def __init__(self, config: Dict[str, Any]): + self.config = config + + def to_dict(self): + return self.config + + +class OssStorageConfig(StorageConfigBase): + """Configuration for OSS Storage.""" + + def __init__( + self, mount_path: str, oss_path: str, oss_endpoint: Optional[str] = None + ) -> None: + """ + Args: + mount_path (str): The target path where the OSS storage will be mounted. + oss_path (str): The source OSS path, must start with `oss://`. e.g. `oss://bucket-name/path/to/data`. + oss_endpoint (Optional[str]): The endpoint address of the OSS bucket, if not provided, + the internal endpoint for the bucket will be used. + """ + self.mount_path = mount_path + self.oss_path = oss_path + self.oss_endpoint = oss_endpoint + + def to_dict(self) -> Dict[str, Any]: + d = { + "mount_path": self.mount_path, + "oss": {"path": self.oss_path}, + } + + if self.oss_endpoint: + d["oss"]["endpoint"] = self.oss_endpoint + return d + + +class NfsStorageConfig(StorageConfigBase): + """Configuration for NFS Storage.""" + + def __init__( + self, + mount_path: str, + nfs_server: str, + nfs_path: str = "/", + read_only: bool = False, + ) -> None: + """ + Args: + mount_path (str): The target path where the NFS storage will be mounted. + nfs_server (str): The NFS server address. e.g. `xxx.cn-shanghai.nas.aliyuncs.com' + nfs_path (str): The source path in the NFS storage, default to '/'. + read_only (bool): Indicates if the NFS storage should be mounted as read-only, default to False. + """ + self.mount_path = mount_path + self.nfs_path = nfs_path + self.read_only = read_only + self.nfs_server = nfs_server + + def to_dict(self) -> Dict[str, Any]: + return { + "mount_path": self.mount_path, + "nfs": { + "path": self.nfs_path, + "readOnly": self.read_only, + "server": self.nfs_server, + }, + } + + +class NodeStorageConfig(StorageConfigBase): + """Use to mount the local node disk storage to the container.""" + + def __init__(self, mount_path) -> None: + """ + Args: + mount_path (str): The target path where the node disk storage will be mounted. + """ + self.mount_path = mount_path + + def to_dict(self) -> Dict[str, Any]: + return { + "empty_dir": {}, + "mount_path": self.mount_path, + } + + +class SharedMemoryConfig(StorageConfigBase): + """Use to configure the shared memory for the container.""" + + def __init__(self, size_limit: int) -> None: + """ + Args: + size_limit (int): Size limit of the shared memory, in GB. + """ + self.size_limit = size_limit + + def to_dict(self) -> Dict[str, Any]: + return { + "empty_dir": { + "medium": "memory", + "size_limit": self.size_limit, + }, + "mount_path": "/dev/shm", + } + + class ResourceConfig(object): """A class that represents the resource used by a PAI prediction service instance.""" @@ -268,7 +393,9 @@ def _upload_source_dir(cls, source_dir, session): f"Input source code path should be a directory: {source_dir}." ) - target_dir = session.get_storage_path_by_category(category="inference_src") + target_dir = session.get_storage_path_by_category( + category=StoragePathCategory.InferenceSrc + ) # upload local script data to the OSS bucket. uploaded_source_code = upload( source_dir, @@ -284,6 +411,7 @@ def mount( source: str, mount_path: str, session: Session = None, + properties: Optional[Dict[str, Any]] = None, ) -> Dict[str, Any]: """Mount a source storage to the running container. @@ -325,19 +453,20 @@ def mount( ) if "storage" in self._cfg_dict: - configs = self._cfg_dict.get("storage", []) + storages = copy.deepcopy(self._cfg_dict.get("storage", [])) else: - configs = [] + storages = [] + configs = [] uris = set() - for conf in configs: - # check if target mount path is already used. - if conf.get("mount_path") == mount_path: - raise MountPathIsOccupiedException( - f"The mount path '{mount_path}' has already been used." - ) - mount_uri = conf.get("oss", {}).get("path") - uris.add(mount_uri) + for s in storages: + # overwrite the existing mount path + if s.get("mount_path") == mount_path: + continue + oss_uri = s.get("oss", {}).get("path") + if oss_uri: + uris.add(oss_uri) + configs.append(s) if is_oss_uri(source): oss_uri_obj = OssUriObj(source) @@ -348,7 +477,9 @@ def mount( elif os.path.exists(source): # if source is a local path, upload it to OSS bucket and use OSS URI # as storage source. - oss_path = session.get_storage_path_by_category("model_data") + oss_path = session.get_storage_path_by_category( + StoragePathCategory.ModelData + ) oss_uri = upload( source_path=source, oss_path=oss_path, bucket=session.oss_bucket ) @@ -362,6 +493,9 @@ def mount( "Source path is not a valid OSS URI or a existing local path." ) + if properties: + storage_config.update({"properties": properties}) + # check if the source OSS Path is already mounted to the container. if oss_uri_obj.get_dir_uri() in uris: raise DuplicatedMountException( @@ -373,6 +507,68 @@ def mount( self.storage = configs return storage_config + def set_model_data(self, model_data: str, mount_path: Optional[str] = None): + """ + Set the model data for the InferenceSpec instance. + + Args: + model_data (str): The model data to be set. It must be an OSS URI. + mount_path (str, optional): The mount path in the container. + + Raises: + DuplicatedMountException: If the model data is already mounted to the container. + """ + + def is_model_storage(storage: Dict[str, Any]): + return ( + "properties" in storage + and storage["properties"].get("resource_type") == "model" + ) + + if not model_data: + return + if not self.is_container_serving(): + # if model_data is an OSS URI with endpoint, truncate the endpoint. + oss_uri_obj = OssUriObj(model_data) + model_path_uri = "oss://{bucket_name}/{key}".format( + bucket_name=oss_uri_obj.bucket_name, + key=oss_uri_obj.object_key, + ) + self.add_option("model_path", model_path_uri) + else: + indexes = [idx for idx, s in enumerate(self.storage) if is_model_storage(s)] + # replace the first model storage with the model_data. + if indexes: + if len(indexes) > 1: + logger.warning( + "Multiple model storage found in the InferenceSpec," + " use the first one." + ) + idx = indexes[0] + oss_uri_obj = OssUriObj(model_data) + + storage_config = { + "path": oss_uri_obj.get_dir_uri(), + } + + if oss_uri_obj.endpoint: + storage_config.update( + { + "endpoint": oss_uri_obj.endpoint, + } + ) + self.storage[idx].oss = self._transform_value(storage_config) + else: + try: + self.mount( + model_data, + mount_path=mount_path or DefaultServiceConfig.model_path, + properties={"resource_type": "model", "resource_use": "base"}, + ) + except DuplicatedMountException as e: + # ignore duplicated mount + logger.warning("Model is already mounted the container: %s", e) + def container_serving_spec( command: str, @@ -384,6 +580,7 @@ def container_serving_spec( requirements: Optional[List[str]] = None, requirements_path: Optional[str] = None, health_check: Optional[Dict[str, Any]] = None, + storage_configs: Optional[List[StorageConfigBase]] = None, session: Optional[Session] = None, ) -> InferenceSpec: """A convenient function to create an InferenceSpec instance that serving the model @@ -458,6 +655,9 @@ def container_serving_spec( health_check (Dict[str, Any], optional): The health check configuration. If it not set, A TCP readiness probe will be used to check the health of the HTTP server. + storage_configs (List[StorageConfigBase], optional): A list of storage configs + used to mount the storage to the container. The storage can be OSS, NFS, + SharedMemory, or NodeStorage, etc. session (Session, optional): A PAI session instance used for communicating with PAI service. @@ -514,12 +714,14 @@ def container_serving_spec( "image": image_uri, "port": port, "script": command, - "env": [ - {"name": key, "value": str(value)} - for key, value in environment_variables.items() - ] - if environment_variables - else [], + "env": ( + [ + {"name": key, "value": str(value)} + for key, value in environment_variables.items() + ] + if environment_variables + else [] + ), } if health_check: @@ -536,9 +738,12 @@ def container_serving_spec( container_spec["prepare"] = { "pythonRequirementsPath": requirements_path, } - inference_spec = InferenceSpec(containers=[container_spec]) + if storage_configs: + storage = [s.to_dict() for s in storage_configs] + inference_spec.storage = storage + # mount the uploaded serving scripts to the serving container. if source_dir: inference_spec.mount( @@ -682,7 +887,9 @@ def _upload_model_data(self): elif not os.path.exists(self.model_data): raise RuntimeError(f"Model data path does not exist: {self.model_data}") - dest_oss_path = self.session.get_storage_path_by_category(category="model_data") + dest_oss_path = self.session.get_storage_path_by_category( + category=StoragePathCategory.ModelData + ) upload_model_data = upload( source_path=self.model_data, oss_path=dest_oss_path, @@ -760,6 +967,7 @@ def deploy( options=options, wait=wait, serializer=serializer, + **kwargs, ) def _generate_service_name(self): @@ -777,6 +985,8 @@ def _deploy( options: Dict[str, Any] = None, wait: bool = True, serializer: "SerializerBase" = None, + labels: Optional[Dict[str, str]] = None, + **kwargs, ): """Create a prediction service.""" if not service_name: @@ -795,7 +1005,7 @@ def _deploy( resource_id=resource_id, options=options, ) - service_name = self.session.service_api.create(config=config) + service_name = self.session.service_api.create(config=config, labels=labels) self._wait_service_visible(service_name) if service_type == ServiceType.Async: predictor = AsyncPredictor( @@ -816,7 +1026,6 @@ def _deploy( ) if wait: predictor.wait_for_ready() - time.sleep(5) return predictor @@ -865,26 +1074,7 @@ def _build_service_config( inference_spec = InferenceSpec( self._get_inference_spec().to_dict() if self.inference_spec else dict() ) - - if self.model_data: - if not inference_spec.is_container_serving(): - # if model_data is an OSS URI with endpoint, truncate the endpoint. - oss_uri_obj = OssUriObj(self.model_data) - model_path_uri = "oss://{bucket_name}/{key}".format( - bucket_name=oss_uri_obj.bucket_name, - key=oss_uri_obj.object_key, - ) - inference_spec.add_option("model_path", model_path_uri) - else: - try: - inference_spec.mount( - self.model_data, - mount_path=DefaultServiceConfig.model_path, - ) - except DuplicatedMountException as e: - # ignore duplicated mount - logger.info("Model is already mounted the container: %s", e) - + inference_spec.set_model_data(model_data=self.model_data) if service_type: inference_spec.add_option("metadata.type", service_type) if inference_spec.is_container_serving(): @@ -987,12 +1177,18 @@ def _deploy_local( # build command to install requirements if requirements_list: - install_requirements = shlex.join( - ["python", "-m", "pip", "install"] + requirements_list + install_requirements = " ".join( + [ + shlex.quote(s) + for s in ["python", "-m", "pip", "install"] + requirements_list + ] ) elif requirements_path: - install_requirements = shlex.join( - ["python", "-m", "pip", "install", "-r", requirements_path] + install_requirements = " ".join( + [ + shlex.quote(s) + for s in ["python", "-m", "pip", "install", "-r", requirements_path] + ] ) else: install_requirements = "" @@ -1054,7 +1250,7 @@ def _wait_local_server_ready( break except requests.ConnectionError: # ConnectionError means server is not ready. - logging.debug("Waiting for the container to be ready...") + logger.debug("Waiting for the container to be ready...") time.sleep(interval) continue @@ -1068,6 +1264,7 @@ def register( format_type: Optional[str] = None, framework_type: Optional[str] = None, training_spec: Optional[Dict[str, Any]] = None, + evaluation_spec: Optional[Dict[str, Any]] = None, approval_status: Optional[str] = None, metrics: Optional[Dict[str, Any]] = None, options: Optional[str] = None, @@ -1104,6 +1301,8 @@ def register( "Xflow", "XGBoost". Default to None. training_spec (dict, optional): The training spec of the model version. Usually, it is got from the training job. Default to None. + evaluation_spec (dict, optional): The evaluation spec of the model version. + Usually, it is got from the processing job for evaluation. Default to None. approval_status (str, optional): The approval status of the model version. The value can be "APPROVED", "PENDING". Default to None. metrics (dict, optional): The metrics of the model version. @@ -1151,6 +1350,7 @@ def register( else: model_id = resp.items[0]["ModelId"] + # TODO support to registry model with evaluation spec version_name = self.session.model_api.create_version( model_id=model_id, uri=self.model_data, @@ -1160,9 +1360,10 @@ def register( format_type=format_type, framework_type=framework_type, training_spec=training_spec, - inference_spec=self.inference_spec.to_dict() - if self.inference_spec - else None, + evaluation_spec=evaluation_spec, + inference_spec=( + self.inference_spec.to_dict() if self.inference_spec else None + ), approval_status=approval_status, metrics=metrics, options=options, @@ -1379,6 +1580,7 @@ def __init__( self.model_name = self._model_info.get("ModelName") self.model_provider = self._model_info.get("Provider") self.task = self._model_info.get("Task") + self.domain = self._model_info.get("Domain") self.framework_type = self._model_version_info.get("FrameworkType") self.source_type = self._model_version_info.get("SourceType") self.source_id = self._model_version_info.get("SourceId") @@ -1386,6 +1588,8 @@ def __init__( self.uri = self._model_version_info.get("Uri") self.model_version = self._model_version_info.get("VersionName") self.training_spec = self._model_version_info.get("TrainingSpec") + self.evaluation_spec = self._model_version_info.get("EvaluationSpec") + self.compression_spec = self._model_version_info.get("CompressionSpec") self.model_labels = { lb["Key"]: lb["Value"] for lb in self._model_info.get("Labels", []) } @@ -1656,6 +1860,20 @@ def deploy( if not self.inference_spec: raise RuntimeError("No inference_spec for the registered model.") + labels = kwargs.pop("labels", dict()) + if self.model_provider == ProviderAlibabaPAI: + default_labels = { + "Task": self.task, + "RootModelName": self.model_name, + "RootModelVersion": self.model_version, + "RootModelID": self.model_id, + "Domain": self.domain, + "CreatedBy": MODEL_TASK_CREATED_BY_QUICKSTART, + "BaseModelUri": self.uri, + } + default_labels.update(labels) + labels = default_labels + if is_local_run_instance_type(instance_type): return self._deploy_local( instance_type=instance_type, @@ -1673,6 +1891,8 @@ def deploy( options=options, wait=wait, serializer=serializer, + labels=labels, + **kwargs, ) def _build_service_config( @@ -1745,20 +1965,72 @@ def _build_service_config( return inference_spec.to_dict() + def get_recipe_spec( + self, recipe_type: "ModelRecipeType", method: Optional[str] = None + ) -> ModelRecipeSpec: + from ._model_recipe import ModelRecipeType + + if recipe_type == ModelRecipeType.TRAINING: + raw_spec = self.training_spec + elif recipe_type == ModelRecipeType.EVALUATION: + raw_spec = self.evaluation_spec + elif recipe_type == ModelRecipeType.COMPRESSION: + raw_spec = self.compression_spec + else: + raise ValueError( + f"Invalid recipe_type: {recipe_type}. Supported recipe types are:" + f" {ModelRecipeType.supported_types()}" + ) + + if type(self)._is_multiple_spec(raw_spec): + supported_methods = list(raw_spec.keys()) + if method and method not in supported_methods: + raise ValueError( + "The model recipe does not support the given method:" + f" {method}. Supported methods are: {supported_methods}." + ) + elif method: + spec = raw_spec.get(method) + else: + method = supported_methods[0] + logger.warning( + f"Model recipe contains multiple specs and method is not specified. " + f"Default method is used: '{method}'. Supported training methods are:" + f" {supported_methods}." + ) + spec = raw_spec.get(method) + else: + if method: + raise ValueError( + "The model recipe contains only one spec, do not specify the method." + ) + spec = raw_spec + return ModelRecipeSpec.model_validate(spec) + + def get_training_spec(self, training_method: Optional[str]) -> ModelRecipeSpec: + from ._model_recipe import ModelRecipeType + + return self.get_recipe_spec(ModelRecipeType.TRAINING, training_method) + def get_estimator( self, + training_method: Optional[str] = None, instance_type: Optional[str] = None, instance_count: Optional[int] = None, hyperparameters: Optional[Dict[str, Any]] = None, base_job_name: Optional[str] = None, output_path: Optional[str] = None, max_run_time: Optional[int] = None, - ): + **kwargs, + ) -> "AlgorithmEstimator": """Generate an AlgorithmEstimator. Generate an AlgorithmEstimator object from RegisteredModel's training_spec. Args: + training_method (str, optional): Used to select the training algorithm + that supported by the model. If not specified, the default training + algorithm will be retrieved from the model version. instance_type (str, optional): The machine instance type used to run the training job. If not provider, the default instance type will be retrieved from the algorithm definition. To view the supported machine @@ -1783,36 +2055,26 @@ def get_estimator( Returns: :class:`pai.estimator.AlgorithmEstimator`: An AlgorithmEstimator object. """ - from .estimator import AlgorithmEstimator + from ..estimator import AlgorithmEstimator + + warnings.warn( + "`.get_estimator` is deprecated and will be removed in a future version, you can now use " + "`.training_recipe` instead.", + category=FutureWarning, + ) if not self.training_spec: raise ValueError( "The provided registered model does not contain training spec." ) - ts = self.training_spec - if "AlgorithmSpec" not in ts and "AlgorithmName" not in ts: - raise ValueError( - "The provided registered model's training spec does not contain any" - " algorithms." - ) - if "AlgorithmSpec" in ts: - algorithm_spec = ts.get("AlgorithmSpec") - algorithm_name, algorithm_provider, algorithm_version = (None, None, None) - else: - algorithm_name, algorithm_provider, algorithm_version = ( - ts.get("AlgorithmName"), - ts.get("AlgorithmProvider"), - ts.get("AlgorithmVersion"), - ) - algorithm_spec = None - + ts = self.get_training_spec(training_method=training_method) hyperparameters = hyperparameters or {} # TODO: validate the given hyperparameters via algorithm definition - for hp in ts.get("HyperParameters", []): - if hp["Name"] not in hyperparameters: + for hp in ts.hyperparameters: + if hp.name not in hyperparameters: hyperparameters.update( { - hp["Name"]: hp["Value"], + hp.name: hp.value, } ) @@ -1820,53 +2082,287 @@ def get_estimator( base_job_name = f"{self.model_name}_training" if self.model_name else None if not max_run_time: - max_run_time = ts.get("Scheduler", {}).get("MaxRunningTimeInSeconds") + max_run_time = ( + ts.scheduler.max_running_time_in_seconds if ts.scheduler else None + ) - train_compute_resource = ts.get("ComputeResource") - if train_compute_resource and (not instance_type or not instance_count): - # If instance_type or instance_count is not provided, use the default - instance_type = instance_type or train_compute_resource.get("EcsSpec") - instance_count = instance_count or train_compute_resource.get("EcsCount") + resource_id = kwargs.get("resource_id") + instance_spec = kwargs.get("instance_spec") + compute_resource = ts.compute_resource + if resource_id: + if instance_type: + logger.warning( + "The instance type is ignored when resource_id is provided." + ) + instance_spec = instance_type or compute_resource.instance_spec + if not instance_spec: + raise ValueError( + "Instance spec is required when resource_id is provided." + ) + instance_spec = InstanceSpec.model_validate(instance_spec) + instance_count = ( + instance_count + or compute_resource.instance_count + or compute_resource.ecs_count + or 1 + ) + else: + if instance_spec: + logger.warning( + "The instance spec is ignored when resource_id is not provided." + ) + instance_type = instance_type or compute_resource.ecs_spec + instance_count = ( + instance_count + or compute_resource.ecs_count + or compute_resource.instance_count + or 1 + ) + + labels = kwargs.pop("labels", dict()) + if self.model_provider == ProviderAlibabaPAI: + default_labels = { + "BaseModelUri": self.uri, + "CreatedBy": MODEL_TASK_CREATED_BY_QUICKSTART, + "Domain": self.domain, + "RootModelID": self.model_id, + "RootModelName": self.model_name, + "RootModelVersion": self.model_version, + "Task": self.task, + } + default_labels.update(labels) + labels = default_labels return AlgorithmEstimator( - algorithm_name=algorithm_name, - algorithm_version=algorithm_version, - algorithm_provider=algorithm_provider, - algorithm_spec=algorithm_spec, + algorithm_name=ts.algorithm_name, + algorithm_version=ts.algorithm_version, + algorithm_provider=ts.algorithm_provider, + algorithm_spec=ts.algorithm_spec, hyperparameters=hyperparameters, base_job_name=base_job_name, max_run_time=max_run_time, instance_type=instance_type, instance_count=instance_count, + instance_spec=instance_spec, output_path=output_path, + labels=labels, + **kwargs, ) - def get_estimator_inputs(self) -> Dict[str, str]: + def get_estimator_inputs(self, training_method=None) -> Dict[str, Any]: """Get the AlgorithmEstimator's default input channels Get the AlgorithmEstimator's default input channels from RegisteredModel's training_spec. Returns: - dict[str, str]: A dict of input channels. + Dict[str, str]: A dict of input channels. """ - if not self.training_spec: + + warnings.warn( + "`.get_estimator_inputs` is deprecated and will be removed in a future version, you can now use " + "`.training_recipe().default_inputs` instead.", + category=FutureWarning, + ) + + default_inputs = ( + self.get_training_spec(training_method=training_method).inputs or [] + ) + + ret = {} + for item in default_inputs: + if isinstance(item, UriInput): + ret[item.name] = item.input_uri + else: + ret[item.name] = item + return ret + + def get_eval_processor( + self, + base_job_name: Optional[str] = None, + output_path: Optional[str] = None, + parameters: Optional[Dict[str, Any]] = None, + max_run_time: Optional[int] = None, + instance_type: Optional[str] = None, + instance_count: Optional[int] = None, + user_vpc_config: Optional[UserVpcConfig] = None, + ): + """Generate a Processor for model evaluation. + + Generate a Processor object from RegisteredModel's evaluation_spec. + + Args: + parameters (dict, optional): A dictionary that represents the + parameters used in the job. Default parameters will + be retrieved from the evaluation spec. + base_job_name (str, optional): The base name used to generate the + job name. If not provided, a default job name will be generated. + output_path (str, optional): An OSS URI to store the outputs of the + jobs. If not provided, an OSS URI will be generated using the default + OSS bucket in the session. When the `estimator.fit` method is called, + a specific OSS URI under the output_path for each channel is generated + and mounted to the container. + max_run_time (int, optional): The maximum time in seconds that the + job can run. The job will be terminated after the time is + reached (Default None). + instance_type (str, optional): The machine instance type used to run the + job. If not provider, the default instance type will be + retrieved from the evaluation spec. To view the supported machine + instance types, please refer to the document: + https://help.aliyun.com/document_detail/171758.htm#section-55y-4tq-84y. + instance_count (int, optional): The number of machines used to run the + job. If not provider, the default instance count will be + retrieved from the evaluation spec. + user_vpc_config (:class:`pai.estimator.UserVpcConfig`, optional): The VPC + configuration used to enable the job instance to connect to the + specified user VPC. If provided, an Elastic Network Interface (ENI) will + be created and attached to the job instance, allowing the + instance to access the resources within the specified VPC. Default to + None. + Returns: + :class:`pai.processor.Processor`: An Processor object. + """ + from ..processor import Processor + + warnings.warn( + "`.get_eval_processor` is deprecated and will be removed in a future version, you can now use " + "`.model_recipe` instead.", + category=FutureWarning, + ) + + eval_spec = self._get_evaluation_spec() + if not eval_spec: raise ValueError( - "The provided registered model does not contain training spec." + "The provided registered model does not contain evaluation spec." ) - ts = self.training_spec - if "AlgorithmSpec" not in ts and "AlgorithmName" not in ts: + eval_spec = ModelRecipeSpec.model_validate(eval_spec) + if not eval_spec.algorithm_spec: raise ValueError( - "The provided registered model's training spec does not contain any" - " algorithms." + "Invalid evaluation spec, the evaluation spec does not contain any" + " configuration for the evaluation job." ) + # workload = eval_spec.get("AlgorithmSpec") - input_channels = {} - if "InputChannels" in ts: - for i in ts["InputChannels"]: - input_channels.update( - { - i["Name"]: i["InputUri"], - } - ) - return input_channels + if not base_job_name: + base_job_name = f"{self.model_name}_eval" if self.model_name else None + + parameters = parameters or dict() + + for item in eval_spec.hyperparameters: + if item.name not in parameters: + parameters[item.name] = item.value + if not max_run_time: + max_run_time = eval_spec.scheduler.max_running_time_in_seconds + + compute_resource = eval_spec.compute_resource + if compute_resource and (not instance_type or not instance_count): + # If instance_type or instance_count is not provided, use the default + instance_type = instance_type or compute_resource.ecs_spec + instance_count = instance_count or compute_resource.ecs_count + + source_dir = None + code_dir = eval_spec.algorithm_spec.code_dir + + if code_dir and code_dir.location_type == "oss": + oss_uri_obj = OssUriObj.from_bucket_key_endpoint( + bucket_name=code_dir.location_value.bucket, + object_key=code_dir.location_value.key, + endpoint=code_dir.location_value.endpoint, + ) + source_dir = oss_uri_obj.uri + processor = Processor( + image_uri=eval_spec.algorithm_spec.image, + command=eval_spec.algorithm_spec.command, + source_dir=source_dir, + parameters=parameters, + max_run_time=max_run_time, + base_job_name=base_job_name, + output_path=output_path, + instance_type=instance_type, + instance_count=instance_count, + user_vpc_config=user_vpc_config, + session=self.session, + ) + processor.set_input_channels(eval_spec.algorithm_spec.input_channels) + processor.set_output_channels(eval_spec.algorithm_spec.output_channels) + + return processor + + def get_evaluation_inputs(self) -> Dict[str, Any]: + """Get the Processor's default input channels + + Get the Processor's default input channels from RegisteredModel's + evaluation_spec. + + Returns: + dict[str, str]: A dict of input channels. + """ + warnings.warn( + "`.get_eval_inputs` is deprecated and will be removed in a future version, you can now use " + "`.model_recipe().default_inputs` instead.", + category=FutureWarning, + ) + + if not self.evaluation_spec: + raise ValueError( + "The provided registered model does not contain evaluation spec." + ) + eval_spec = ModelRecipeSpec.model_validate(self.evaluation_spec) + inputs = eval_spec.inputs or [] + res = {} + + for item in inputs: + res[item.name] = item.input_uri if isinstance(item, UriInput) else item + return res + + @classmethod + def _is_multiple_spec(cls, spec: Dict[str, Any]) -> bool: + return not ("AlgorithmSpec" in spec or "AlgorithmName" in spec) + + def _get_evaluation_spec(self): + """Get the evaluation_spec of the registered model.""" + return self.evaluation_spec + + def training_recipe(self, method: Optional[str] = None) -> "ModelTrainingRecipe": + """Get the training recipe of the registered model. + + Args: + method (str, optional): The training method used to select the + specific training recipe. + + Returns: + :class:`pai.model.ModelTrainingRecipe`: A ModelTrainingRecipe object. + + """ + from ._model_recipe import ModelTrainingRecipe + + return ModelTrainingRecipe( + model_name=self.model_name, + model_version=self.model_version, + model_provider=self.model_provider, + method=method, + ) + + def model_recipe( + self, recipe_type: "ModelRecipeType", method: Optional[str] = None + ) -> "ModelRecipe": + """Initialize a ModelRecipe object from the recipe spec of the registered model. + + Args: + recipe_type (ModelRecipeType): The recipe type used to select the specific model recipe. + supported recipe types are: "training", "evaluation", "compression". + method (str, optional): The method used to select the specific model recipe. + + Returns: + :class:`pai.model.ModelRecipe`: A ModelRecipe object. + + """ + from ._model_recipe import ModelRecipe + + return ModelRecipe( + model_name=self.model_name, + model_version=self.model_version, + model_provider=self.model_provider, + recipe_type=recipe_type, + method=method, + ) diff --git a/pai/model/_model_recipe.py b/pai/model/_model_recipe.py new file mode 100644 index 0000000..62327bc --- /dev/null +++ b/pai/model/_model_recipe.py @@ -0,0 +1,781 @@ +# Copyright 2023 Alibaba, Inc. or its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import dataclasses +import enum +import shutil +from typing import Any, Dict, List, Optional, Tuple, Union + +from ..common.logging import get_logger +from ..common.oss_utils import download, is_oss_uri +from ..job._training_job import ( + DEFAULT_OUTPUT_MODEL_CHANNEL_NAME, + AlgorithmSpec, + Channel, + ComputeResource, + DatasetConfig, + ExperimentConfig, + HyperParameterDefinition, + InstanceSpec, + ModelRecipeSpec, + OssLocation, + ResourceType, + SpotSpec, + TrainingJob, + UriInput, + UserVpcConfig, + _TrainingJobSubmitter, +) +from ..predictor import Predictor +from ..session import get_default_session +from ._model import InferenceSpec, Model, RegisteredModel, ResourceConfig + +logger = get_logger(__name__) + + +@dataclasses.dataclass +class RecipeInitKwargs(object): + model_name: Optional[str] + model_version: Optional[str] + model_provider: Optional[str] + method: Optional[str] + # following fields are generated from model or overridden + model_channel_name: Optional[str] + model_uri: Optional[str] + hyperparameters: Optional[Dict[str, Any]] + hyperparameter_definitions: Optional[List[HyperParameterDefinition]] + job_type: Optional[str] + image_uri: Optional[str] + source_dir: Optional[str] + command: Union[str, List[str]] + resource_id: Optional[str] + instance_count: Optional[int] + instance_type: Optional[str] + instance_spec: Optional[InstanceSpec] + max_run_time: Optional[int] + labels: Optional[Dict[str, str]] + requirements: Optional[List[str]] + environments: Optional[Dict[str, str]] + input_channels: Optional[List[Channel]] + output_channels: Optional[List[Channel]] + default_inputs: Optional[Union[UriInput, DatasetConfig]] + customization: Optional[Dict[str, Any]] + supported_instance_types: Optional[List[str]] + + +class ModelRecipeType(enum.Enum): + TRAINING = "training" + EVALUATION = "evaluation" + COMPRESSION = "compression" + + @classmethod + def supported_types(cls): + return [cls.TRAINING, cls.EVALUATION, cls.COMPRESSION] + + +class ModelRecipe(_TrainingJobSubmitter): + MODEL_CHANNEL_NAME = "model" + + def __init__( + self, + model_name: Optional[str] = None, + model_version: Optional[str] = None, + model_provider: Optional[str] = None, + model_uri: Optional[str] = None, + recipe_type: ModelRecipeType = ModelRecipeType.TRAINING, + method: Optional[str] = None, + source_dir: Optional[str] = None, + model_channel_name: Optional[str] = "model", + hyperparameters: Optional[Dict[str, Any]] = None, + job_type: Optional[str] = None, + image_uri: Optional[str] = None, + command: Union[str, List[str]] = None, + instance_count: Optional[int] = None, + instance_type: Optional[str] = None, + instance_spec: Optional[InstanceSpec] = None, + resource_id: Optional[str] = None, + resource_type: Optional[Union[str, ResourceType]] = None, + spot_spec: Optional[SpotSpec] = None, + user_vpc_config: Optional[UserVpcConfig] = None, + labels: Optional[Dict[str, str]] = None, + requirements: Optional[List[str]] = None, + environments: Optional[Dict[str, str]] = None, + experiment_config: Optional[ExperimentConfig] = None, + input_channels: Optional[List[Channel]] = None, + output_channels: Optional[List[Channel]] = None, + max_run_time: Optional[int] = None, + default_inputs: Optional[Dict[str, Any]] = None, + base_job_name: Optional[str] = None, + supported_instance_type: Optional[List[str]] = None, + settings: Optional[Dict[str, Any]] = None, + ): + init_kwargs = self._init_kwargs( + model_name=model_name, + model_version=model_version, + model_provider=model_provider, + recipe_type=recipe_type, + method=method, + # get from model or override + model_uri=model_uri, + model_channel_name=model_channel_name, + hyperparameters=hyperparameters, + job_type=job_type, + image_uri=image_uri, + source_dir=source_dir, + command=command, + instance_count=instance_count, + instance_spec=instance_spec, + instance_type=instance_type, + labels=labels, + requirements=requirements, + environments=environments, + input_channels=input_channels, + output_channels=output_channels, + default_inputs=default_inputs, + max_run_time=max_run_time, + supported_instance_types=supported_instance_type, + ) + self.model_name = init_kwargs.model_name + self.model_version = init_kwargs.model_version + self.model_provider = init_kwargs.model_provider + self.method = init_kwargs.method + self.model_uri = init_kwargs.model_uri + self.model_channel_name = init_kwargs.model_channel_name + self.job_type = init_kwargs.job_type + self.hyperparameters = init_kwargs.hyperparameters + self.image_uri = init_kwargs.image_uri + self.command = init_kwargs.command + self.source_dir = init_kwargs.source_dir + self.default_inputs = init_kwargs.default_inputs + self.customization = init_kwargs.customization + self.supported_instance_types = init_kwargs.supported_instance_types + self.input_channels = init_kwargs.input_channels + self.output_channels = init_kwargs.output_channels + self.hyperparameter_definitions = init_kwargs.hyperparameter_definitions + + super().__init__( + resource_type=resource_type, + base_job_name=base_job_name, + experiment_config=experiment_config, + resource_id=resource_id, + user_vpc_config=user_vpc_config, + spot_spec=spot_spec, + instance_type=init_kwargs.instance_type, + instance_count=init_kwargs.instance_count, + instance_spec=init_kwargs.instance_spec, + max_run_time=init_kwargs.max_run_time, + environments=init_kwargs.environments, + requirements=init_kwargs.requirements, + labels=init_kwargs.labels, + settings=settings, + ) + + @classmethod + def _init_kwargs( + cls, + model_name: Optional[str] = None, + model_version: Optional[str] = None, + model_provider: Optional[str] = None, + recipe_type: ModelRecipeType = ModelRecipeType.TRAINING, + method: Optional[str] = None, + model_channel_name: Optional[str] = "model", + model_uri: Optional[str] = None, + hyperparameters: Optional[Dict[str, Any]] = None, + job_type: Optional[str] = None, + image_uri: Optional[str] = None, + source_dir: Optional[str] = None, + command: Union[str, List[str]] = None, + instance_count: Optional[int] = None, + instance_type: Optional[str] = None, + resource_id: Optional[str] = None, + instance_spec: Optional[InstanceSpec] = None, + max_run_time: Optional[int] = None, + labels: Optional[Dict[str, str]] = None, + requirements: Optional[List[str]] = None, + environments: Optional[Dict[str, str]] = None, + input_channels: List[Channel] = None, + output_channels: List[Channel] = None, + default_inputs: Optional[Union[UriInput, DatasetConfig]] = None, + supported_instance_types: Optional[List[str]] = None, + ) -> RecipeInitKwargs: + model = ( + RegisteredModel( + model_name=model_name, + model_version=model_version, + model_provider=model_provider, + ) + if model_name + else None + ) + model_recipe_spec = ( + model.get_recipe_spec(recipe_type=recipe_type, method=method) + if model + else None + ) + model_uri = model_uri or (model and model.uri) + customization = None + if not model_recipe_spec: + return RecipeInitKwargs( + model_name=model_name, + model_version=model_version, + model_provider=model_provider, + method=method, + model_channel_name=model_channel_name, + model_uri=model_uri, + hyperparameters=hyperparameters, + job_type=job_type, + image_uri=image_uri, + source_dir=source_dir, + command=command, + instance_count=instance_count, + instance_type=instance_type, + instance_spec=instance_spec, + resource_id=resource_id, + labels=labels, + requirements=requirements, + environments=environments, + input_channels=input_channels, + output_channels=output_channels, + max_run_time=max_run_time, + default_inputs=default_inputs, + customization=customization, + supported_instance_types=supported_instance_types, + hyperparameter_definitions=None, + ) + if not model_uri: + input_ = next( + ( + item + for item in model_recipe_spec.inputs + if item.name == model_channel_name + ), + None, + ) + + if input_: + if isinstance(input_, UriInput): + model_uri = input_.input_uri + else: + logger.warning( + "Input channel '%s' is not a URI input: %s", + model_channel_name, + type(input_), + ) + + if not default_inputs and model_recipe_spec.inputs: + default_inputs = {} + for item in model_recipe_spec.inputs: + if isinstance(item, UriInput): + default_inputs[item.name] = item.input_uri + else: + default_inputs[item.name] = item + algorithm_spec = cls._get_algorithm_spec(model_recipe_spec) + supported_instance_types = ( + supported_instance_types or model_recipe_spec.supported_instance_types + ) + hyperparameter_definitions = None + if algorithm_spec: + if ( + not source_dir + and algorithm_spec.code_dir + and isinstance(algorithm_spec.code_dir.location_value, OssLocation) + ): + oss_location = algorithm_spec.code_dir.location_value + if oss_location.endpoint: + source_dir = f"oss://{oss_location.bucket}.{oss_location.endpoint}/{oss_location.key.lstrip('/')}" + else: + source_dir = ( + f"oss://{oss_location.bucket}/{oss_location.key.lstrip('/')}" + ) + image_uri = image_uri or algorithm_spec.image + command = command or algorithm_spec.command + job_type = job_type or algorithm_spec.job_type + input_channels = input_channels or algorithm_spec.input_channels + output_channels = output_channels or algorithm_spec.output_channels + customization = algorithm_spec.customization + supported_instance_types = ( + supported_instance_types or algorithm_spec.supported_channel_types + ) + hyperparameter_definitions = algorithm_spec.hyperparameter_definitions + + instance_type, instance_spec, instance_count = cls._get_compute_resource_config( + instance_type=instance_type, + instance_spec=instance_spec, + instance_count=instance_count, + resource_id=resource_id, + compute_resource=model_recipe_spec.compute_resource, + supported_instance_types=supported_instance_types, + ) + hyperparameters = hyperparameters or {} + hyperparameters = { + **{ + hp.name: hp.default_value + for hp in ( + algorithm_spec and algorithm_spec.hyperparameter_definitions or {} + ) + if hp.default_value is not None and hp.default_value != "" + }, + **{hp.name: hp.value for hp in model_recipe_spec.hyperparameters}, + **hyperparameters, + } + requirements = requirements or model_recipe_spec.requirements + environments = environments or model_recipe_spec.environments + + return RecipeInitKwargs( + model_name=model_name, + model_version=model_version, + model_provider=model_provider, + method=method, + model_uri=model_uri, + model_channel_name=model_channel_name, + hyperparameters=hyperparameters, + job_type=job_type, + image_uri=image_uri, + source_dir=source_dir, + command=command, + instance_count=instance_count, + instance_spec=instance_spec, + instance_type=instance_type, + max_run_time=max_run_time, + labels=labels, + requirements=requirements, + environments=environments, + input_channels=input_channels, + output_channels=output_channels, + resource_id=resource_id, + default_inputs=default_inputs, + customization=customization, + supported_instance_types=supported_instance_types, + hyperparameter_definitions=hyperparameter_definitions, + ) + + @staticmethod + def _get_compute_resource_config( + instance_type: str, + instance_count: int, + instance_spec: InstanceSpec, + resource_id: str, + compute_resource: ComputeResource, + supported_instance_types: List[str], + ) -> Tuple[str, InstanceSpec, int]: + if resource_id: + if instance_type: + logger.warning( + "The instance type is ignored when resource_id is provided." + ) + instance_spec = instance_spec or ( + compute_resource and compute_resource.instance_spec + ) + if not instance_spec: + raise ValueError( + "Running in dedicated resource group, please provide instance spec" + " for the training job." + ) + instance_count = ( + instance_count + or (compute_resource and compute_resource.instance_count) + or 1 + ) + else: + if instance_spec: + logger.warning( + "The instance spec is ignored when resource_id is not provided." + ) + instance_type = instance_type or ( + compute_resource and compute_resource.ecs_spec + ) + if not instance_type: + if not supported_instance_types: + raise ValueError( + "No instance type is specified for the training job" + ) + else: + instance_type = supported_instance_types[0] + instance_count = ( + instance_count or (compute_resource and compute_resource.ecs_count) or 1 + ) + return instance_type, instance_spec, instance_count + + @staticmethod + def _get_algorithm_spec(model_recipe_spec: ModelRecipeSpec) -> AlgorithmSpec: + session = get_default_session() + if model_recipe_spec.algorithm_spec: + return model_recipe_spec.algorithm_spec + + if not model_recipe_spec.algorithm_name: + raise ValueError( + "Both algorithm_name and algorithm_spec are not provided " + "in the model training spec." + ) + + algo = session.algorithm_api.get_by_name( + algorithm_name=model_recipe_spec.algorithm_name, + algorithm_provider=model_recipe_spec.algorithm_provider, + ) + raw_algo_version_spec = session.algorithm_api.get_version( + algorithm_id=algo["AlgorithmId"], + algorithm_version=model_recipe_spec.algorithm_version, + ) + return AlgorithmSpec.model_validate(raw_algo_version_spec["AlgorithmSpec"]) + + def _build_algorithm_spec( + self, code_input, inputs: Dict[str, Any] + ) -> AlgorithmSpec: + algorithm_spec = AlgorithmSpec( + command=( + self.command + if isinstance(self.command, list) + else ["sh", "-c", self.command] + ), + image=self.image_uri, + job_type=self.job_type, + code_dir=code_input, + output_channels=self.output_channels + or self._default_training_output_channels(), + input_channels=self.input_channels + or [ + Channel(name=channel_name, required=False) + for channel_name in inputs.keys() + ], + customization=self.customization, + ) + return algorithm_spec + + def retrieve_scripts(self, local_path: str) -> str: + """Retrieve the training scripts to the local file system. + + Args: + local_path (str): The local path where the training scripts are saved. + + Returns: + str: The local path where the training scripts are saved. + + """ + + if not self.source_dir: + raise RuntimeError("Source code is not available for the training job.") + + if is_oss_uri(self.source_dir): + return download(self.source_dir, local_path, un_tar=True) + else: + shutil.copytree(self.source_dir, local_path) + return local_path + + def run( + self, + inputs: Optional[Dict[str, Union[str, DatasetConfig]]] = None, + outputs: Optional[Dict[str, Union[str, DatasetConfig]]] = None, + wait: bool = True, + job_name: Optional[str] = None, + show_logs: bool = True, + ) -> TrainingJob: + """Start a training job with the given inputs. + + Args: + inputs (Dict[str, Union[str, DatasetConfig]], optional): A dictionary of inputs + used in the training job. The keys are the channel name and the values are + the URIs of the input data. If not specified, the default inputs will be + used. + wait (bool): Whether to wait for the job to complete before returning. Default + to True. + job_name (str, optional): The name of the training job. If not provided, a default + job name will be generated. + show_logs (bool): Whether to show the logs of the training job. Default to True. + + Returns: + :class:`pai.training.TrainingJob`: A submitted training job. + + """ + job_name = self.job_name(job_name) + + inputs = inputs or dict() + code_input = self._build_code_input(job_name, source_dir=self.source_dir) + algo_spec = self._build_algorithm_spec( + code_input=code_input, + inputs=inputs, + ) + + if self.model_channel_name not in inputs: + inputs[self.model_channel_name] = self.model_uri + + if len(inputs.keys()) == 1 and self.model_channel_name in inputs: + default_inputs = self.default_inputs + else: + default_inputs = None + + inputs = self.build_inputs( + inputs=inputs, + input_channels=algo_spec.input_channels, + default_inputs=default_inputs, + ) + outputs = self.build_outputs( + job_name=job_name, + output_channels=algo_spec.output_channels, + outputs=outputs, + ) + return self._submit( + job_name=job_name, + algorithm_spec=algo_spec, + instance_spec=self.instance_spec, + instance_type=self.instance_type, + instance_count=self.instance_count, + resource_id=self.resource_id, + hyperparameters=self.hyperparameters, + environments=self.environments, + requirements=self.requirements, + max_run_time=self.max_run_time, + inputs=inputs, + outputs=outputs, + user_vpc_config=self.user_vpc_config if self.user_vpc_config else None, + # experiment_config=self.experiment_config if self.experiment_config else None, + labels=self.labels, + wait=wait, + show_logs=show_logs, + ) + + +class ModelTrainingRecipe(ModelRecipe): + """A recipe used to train a model.""" + + def __init__( + self, + model_name: Optional[str] = None, + model_version: Optional[str] = None, + model_provider: Optional[str] = None, + model_uri: Optional[str] = None, + method: Optional[str] = None, + source_dir: Optional[str] = None, + model_channel_name: Optional[str] = "model", + hyperparameters: Optional[Dict[str, Any]] = None, + job_type: Optional[str] = None, + image_uri: Optional[str] = None, + command: Union[str, List[str]] = None, + instance_count: Optional[int] = None, + instance_type: Optional[str] = None, + spot_spec: Optional[SpotSpec] = None, + instance_spec: Optional[InstanceSpec] = None, + resource_id: Optional[str] = None, + resource_type: Optional[Union[str, ResourceType]] = None, + user_vpc_config: Optional[UserVpcConfig] = None, + labels: Optional[Dict[str, str]] = None, + requirements: Optional[List[str]] = None, + environments: Optional[Dict[str, str]] = None, + experiment_config: Optional[ExperimentConfig] = None, + input_channels: Optional[List[Channel]] = None, + output_channels: Optional[List[Channel]] = None, + max_run_time: Optional[int] = None, + default_training_inputs: Optional[Dict[str, Any]] = None, + base_job_name: Optional[str] = None, + **kwargs, + ): + """Initialize a ModelTrainingRecipe object. + + Args: + model_name (str, optional): The name of the registered model. Default to + None. + model_version (str, optional): The version of the registered model. Default + to None. + model_provider (str, optional): The provider of the registered model. + Optional values are "pai", "huggingface" or None. If None, list + registered models in the workspace of the current session. Default to + None. + method (str, optional): The training method used to select the + specific training recipe while the registered model contains multiple + model training specs. Default to None. + model_channel_name (str, optional): The name of the model channel. Default to + "model". + model_uri (str, optional): The URI of the input pretrained model. If the URI + is not provided, the model from the registered model will be used. + Default to None. + hyperparameters (dict, optional): A dictionary of hyperparameters used in + the training job. Default to None. + job_type (str, optional): The type of the job, supported values are "PyTorch", + "TfJob", "XGBoostJob" etc. + image_uri (str, optional): The URI of the Docker image. Default to None. + source_dir (str, optional): The source code using in the training job, which + is a directory containing the training script or an OSS URI. Default to + None. + command (str or list, optional): The command to execute in the training job. + Default to None. + requirements (list, optional): A list of Python requirements used to install + the dependencies in the training job. Default to None. + instance_count (int, optional): The number of instances to use for training. + Default to None. + instance_type (str, optional): The instance type to use for training. Default + to None. + instance_spec (:class:`pai.model.InstanceSpec`, optional): The resource config + for each instance of the training job. The dedicated resource group must + be provided when the instance spec is set. Default to None. + resource_id (str, optional): The ID of the resource group used to run the + training job. Default to None. + spot_spec (:class:`pai.model.SpotSpec`, optional): The spot instance config + used to run the training job. If provided, spot instance will be used. + resource_type (str, optional): The resource type used to run the training job. + By default, general computing resource is used. If the resource_type is + 'Lingjun', Lingjun computing resource is used. + user_vpc_config (:class:`pai.model.UserVpcConfig`, optional): The VPC + configuration used to enable the job instance to connect to the + specified user VPC. Default to None. + environments (dict, optional): A dictionary of environment variables used in + the training job. Default to None. + experiment_config (:class:`pai.model.ExperimentConfig`, optional): The + experiment + labels (dict, optional): A dictionary of labels used to tag the training job. + Default to None. + + """ + super().__init__( + model_name=model_name, + model_version=model_version, + model_provider=model_provider, + model_uri=model_uri, + method=method, + recipe_type=ModelRecipeType.TRAINING, + source_dir=source_dir, + model_channel_name=model_channel_name, + hyperparameters=hyperparameters, + job_type=job_type, + image_uri=image_uri, + command=command, + instance_count=instance_count, + instance_type=instance_type, + instance_spec=instance_spec, + resource_type=resource_type, + resource_id=resource_id, + spot_spec=spot_spec, + user_vpc_config=user_vpc_config, + labels=labels, + requirements=requirements, + environments=environments, + experiment_config=experiment_config, + input_channels=input_channels, + output_channels=output_channels, + max_run_time=max_run_time, + default_inputs=default_training_inputs, + base_job_name=base_job_name, + **kwargs, + ) + + def train( + self, + inputs: Optional[Dict[str, Union[str, DatasetConfig]]] = None, + outputs: Optional[Dict[str, Union[str, DatasetConfig]]] = None, + wait: bool = True, + job_name: Optional[str] = None, + show_logs: bool = True, + ) -> TrainingJob: + """Start a training job with the given inputs. + + Args: + inputs (Dict[str, Union[str, DatasetConfig]], optional): A dictionary of inputs + used in the training job. The keys are the channel name and the values are + the URIs of the input data. If not specified, the default inputs will be + used. + outputs (Dict[str, Union[str, DatasetConfig]], optional): A dictionary of outputs + used in the training job. The keys are the channel name and the values are + the URIs or Dataset of the output data. + wait (bool): Whether to wait for the job to complete before returning. Default + to True. + job_name (str, optional): The name of the training job. If not provided, a default + job name will be generated. + show_logs (bool): Whether to show the logs of the training job. Default to True. + Note that the logs will be shown only when the `wait` is set to True. + + Returns: + :class:`pai.training.TrainingJob`: A submitted training job. + + """ + return self.run( + inputs=inputs, + outputs=outputs, + wait=wait, + job_name=job_name, + show_logs=show_logs, + ) + + def deploy( + self, + service_name: str, + instance_type: Optional[str] = None, + instance_count: int = 1, + resource_config: Optional[Union[ResourceConfig, Dict[str, int]]] = None, + resource_id: str = None, + options: Optional[Dict[str, Any]] = None, + wait=True, + inference_spec: Optional[InferenceSpec] = None, + **kwargs, + ) -> Predictor: + """Deploy the training job output model as a online prediction service. + + Args: + service_name (str): The name of the online prediction service. + instance_type (str, optional): The instance type used to run the service. + instance_count (int, optional): The number of instances used to run the + service. Default to 1. + resource_config (Union[ResourceConfig, Dict[str, int]], optional): The resource + config for the service. Default to None. + resource_id (str, optional): The ID of the resource group used to run the + service. Default to None. + options (Dict[str, Any], optional): The options used to deploy the service. + Default to None. + wait (bool, optional): Whether to wait for the service endpoint to be ready. + inference_spec (:class:`pai.model.InferenceSpec`, optional): The inference + spec used to deploy the service. If not provided, the `inference_spec` of + the model will be used. Default to None. + kwargs: Additional keyword arguments used to deploy the service. + + Returns: + :class:`pai.predictor.Predictor`: A predictor object refers to the created + service. + """ + if not inference_spec and self.model_name: + model = RegisteredModel( + model_name=self.model_name, + model_version=self.model_version, + model_provider=self.model_provider, + ) + inference_spec = model.inference_spec + + if not inference_spec: + raise RuntimeError("No inference_spec is available for model deployment.") + + m = Model( + model_data=self.model_data(), + inference_spec=inference_spec, + ) + p = m.deploy( + service_name=service_name, + instance_type=instance_type, + instance_count=instance_count, + resource_config=resource_config, + resource_id=resource_id, + options=options, + wait=wait, + **kwargs, + ) + return p + + def model_data(self): + + if not self._training_jobs: + raise RuntimeError("No training job is available for deployment.") + + if not self.latest_job.is_succeeded(): + logger.warning( + "The latest training job is not succeeded, the deployment may not work." + ) + + return self.latest_job.output_path( + channel_name=DEFAULT_OUTPUT_MODEL_CHANNEL_NAME + ) diff --git a/pai/modelscope/estimator.py b/pai/modelscope/estimator.py index 94ba318..ffa2b96 100644 --- a/pai/modelscope/estimator.py +++ b/pai/modelscope/estimator.py @@ -12,15 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging from typing import Any, Dict, List, Optional from ..api.image import ImageLabel +from ..common.logging import get_logger from ..common.utils import to_semantic_version from ..estimator import Estimator from ..session import Session -logger = logging.getLogger(__name__) +logger = get_logger(__name__) class ModelScopeEstimator(Estimator): @@ -196,6 +196,8 @@ def __init__( session=session, **kwargs, ) + # Check image_uri and modelscope_version + self.training_image_uri() def _validate_image_uri(self, image_uri: str, modelscope_version: str) -> None: """Check if image_uri or modelscope_version arguments are specified.""" @@ -219,10 +221,8 @@ def training_image_uri(self) -> str: labels = [ ImageLabel.OFFICIAL_LABEL, - ImageLabel.DLC_LABEL, - ImageLabel.PROVIDER_COMMUNITY_LABEL, + ImageLabel.DSW_LABEL, ImageLabel.DEVICE_TYPE_GPU, - ImageLabel.framework_version("PyTorch", "*"), ] # Filter images by ModelScope version @@ -255,10 +255,8 @@ def _get_supported_ms_versions_for_training(self) -> List[str]: label_keys = "system.framework.ModelScope" label_filter = [ ImageLabel.OFFICIAL_LABEL, - ImageLabel.DLC_LABEL, - ImageLabel.PROVIDER_COMMUNITY_LABEL, + ImageLabel.DSW_LABEL, ImageLabel.DEVICE_TYPE_GPU, - ImageLabel.framework_version("PyTorch", "*"), ImageLabel.framework_version("ModelScope", "*"), ] list_image_labels = self.session.image_api.list_labels( @@ -272,6 +270,7 @@ def _get_supported_ms_versions_for_training(self) -> List[str]: if label["Value"] not in res: res.append(label["Value"]) + res.sort(key=lambda x: to_semantic_version(x)) return res def _get_latest_ms_version_for_training(self) -> str: diff --git a/pai/modelscope/model.py b/pai/modelscope/model.py index 662ea90..246b31b 100644 --- a/pai/modelscope/model.py +++ b/pai/modelscope/model.py @@ -12,24 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging -import re from typing import Any, Dict, List, Optional, Union from ..api.image import ImageLabel +from ..common.logging import get_logger from ..common.utils import to_semantic_version -from ..model import ( +from ..model._model import ( DefaultServiceConfig, ModelBase, ResourceConfig, + StorageConfigBase, container_serving_spec, ) from ..serializers import SerializerBase from ..session import Session, get_default_session -logger = logging.getLogger(__name__) - -_PAI_MS_IMAGE_TAG_PATTERN_INFERENCE = re.compile(r"modelscope-inference:(\d.+)") +logger = get_logger(__name__) class ModelScopeModel(ModelBase): @@ -79,6 +77,7 @@ def __init__( requirements: Optional[List[str]] = None, requirements_path: Optional[str] = None, health_check: Optional[Dict[str, Any]] = None, + storage_configs: Optional[List[StorageConfigBase]] = None, session: Optional[Session] = None, ): """Initialize a ModelScope Model. @@ -147,6 +146,9 @@ def __init__( health_check (Dict[str, Any], optional): The health check configuration. If it not set, A TCP readiness probe will be used to check the health of the Model server. + storage_configs (List[StorageConfigBase], optional): A list of storage configs + used to mount the storage to the container. The storage can be OSS, NFS, + SharedMemory, or NodeStorage, etc. session (:class:`pai.session.Session`, optional): A pai session object manages interactions with PAI REST API. @@ -171,10 +173,13 @@ def __init__( self.requirements = requirements self.requirements_path = requirements_path self.health_check = health_check + self.storage_configs = storage_configs super(ModelScopeModel, self).__init__( model_data=self.model_data, session=session, ) + # Check image_uri and modelscope_version + self.serving_image_uri() def _validate_args(self, image_uri: str, modelscope_version: str) -> None: """Check if image_uri or modelscope_version arguments are specified.""" @@ -184,7 +189,7 @@ def _validate_args(self, image_uri: str, modelscope_version: str) -> None: "Specify either modelscope_version or image_uri." ) - def serving_image_uri(self, instance_type: str) -> str: + def serving_image_uri(self) -> str: """Return the Docker image to use for serving. The :meth:`pai.modelscope.model.ModelScopeModel.deploy` method, that does the @@ -208,10 +213,13 @@ def serving_image_uri(self, instance_type: str) -> str: # Filter images by Transformers version if self.modelscope_version == "latest": latest_version = self._get_latest_ms_version_for_inference() - name = f"modelscope-inference:{latest_version}" + labels.append(ImageLabel.framework_version("ModelScope", latest_version)) else: - name = f"modelscope-inference:{self.modelscope_version}" + labels.append( + ImageLabel.framework_version("ModelScope", self.modelscope_version) + ) + name = "modelscope-inference:" list_images = self.session.image_api.list( name=name, labels=labels, @@ -236,21 +244,25 @@ def _get_supported_ms_versions_for_inference(self) -> List[str]: ImageLabel.EAS_LABEL, ImageLabel.PROVIDER_PAI_LABEL, ImageLabel.DEVICE_TYPE_GPU, + ImageLabel.framework_version("ModelScope", "*"), ] name = "modelscope-inference:" list_images = self.session.image_api.list( name=name, labels=labels, + verbose=True, workspace_id=0, ).items res = [] for image in list_images: - tag_match = _PAI_MS_IMAGE_TAG_PATTERN_INFERENCE.match(image["Name"]) - (modelscope_version,) = tag_match.groups() - if modelscope_version not in res: - res.append(modelscope_version) - + for label in image["Labels"]: + if ( + label["Key"] == "system.framework.ModelScope" + and label["Value"] not in res + ): + res.append(label["Value"]) + res.sort(key=lambda x: to_semantic_version(x)) return res def _get_latest_ms_version_for_inference(self) -> str: @@ -322,7 +334,7 @@ def deploy( :class:`pai.predictor.Predictor` : A PAI ``Predictor`` instance used for making prediction to the prediction service. """ - image_uri = self.serving_image_uri(instance_type=instance_type) + image_uri = self.serving_image_uri() self.inference_spec = container_serving_spec( command=self.command, image_uri=image_uri, @@ -334,6 +346,7 @@ def deploy( requirements_path=self.requirements_path, health_check=self.health_check, session=self.session, + storage_configs=self.storage_configs, ) return super(ModelScopeModel, self).deploy( service_name=service_name, @@ -344,4 +357,5 @@ def deploy( options=options, wait=wait, serializer=serializer, + **kwargs, ) diff --git a/pai/pipeline/component/_base.py b/pai/pipeline/component/_base.py index ad2668c..599206c 100644 --- a/pai/pipeline/component/_base.py +++ b/pai/pipeline/component/_base.py @@ -13,19 +13,19 @@ # limitations under the License. import itertools -import logging import uuid from abc import ABCMeta, abstractmethod import six -from pai.common.utils import random_str -from pai.common.yaml_utils import dump as yaml_dump -from pai.common.yaml_utils import dump_all as yaml_dump_all -from pai.pipeline.types import IO_TYPE_INPUTS, IO_TYPE_OUTPUTS, InputsSpec, OutputsSpec -from pai.session import get_default_session +from ...common.logging import get_logger +from ...common.utils import random_str +from ...common.yaml_utils import dump as yaml_dump +from ...common.yaml_utils import dump_all as yaml_dump_all +from ...session import get_default_session +from ..types import IO_TYPE_INPUTS, IO_TYPE_OUTPUTS, InputsSpec, OutputsSpec -logger = logging.getLogger(__name__) +logger = get_logger(__name__) DEFAULT_PIPELINE_API_VERSION = "core/v1" diff --git a/pai/pipeline/component/_container.py b/pai/pipeline/component/_container.py index bf196b5..491e545 100644 --- a/pai/pipeline/component/_container.py +++ b/pai/pipeline/component/_container.py @@ -16,22 +16,22 @@ from __future__ import print_function -import logging import uuid import six from deprecated import deprecated -from pai.common.yaml_utils import dump as yaml_dump -from pai.pipeline.component._base import UnRegisteredComponent -from pai.pipeline.types.variable import PipelineVariable -from pai.session import get_default_session +from ...common.logging import get_logger +from ...common.yaml_utils import dump as yaml_dump +from ...session import get_default_session +from ..types.variable import PipelineVariable +from ._base import UnRegisteredComponent PAI_MANIFEST_SPEC_INPUTS_ENV_KEY = "PAI_MANIFEST_SPEC_INPUTS" PAI_MANIFEST_SPEC_OUTPUTS_ENV_KEY = "PAI_MANIFEST_SPEC_OUTPUTS" PAI_INPUTS_PARAMETERS_ENV_KEY = "PAI_INPUTS_PARAMETERS" -_logger = logging.getLogger(__name__) +_logger = get_logger(__name__) @deprecated( diff --git a/pai/pipeline/core.py b/pai/pipeline/core.py index 653eebf..87932e6 100644 --- a/pai/pipeline/core.py +++ b/pai/pipeline/core.py @@ -14,22 +14,17 @@ from __future__ import absolute_import -import logging from collections import Counter, defaultdict -from pai.common.yaml_utils import dump as yaml_dump -from pai.common.yaml_utils import dump_all as yaml_dump_all -from pai.pipeline.component._base import UnRegisteredComponent -from pai.pipeline.types import ( - InputsSpec, - OutputsSpec, - PipelineParameter, - PipelineVariable, -) -from pai.pipeline.types.artifact import PipelineArtifact, PipelineArtifactElement -from pai.session import get_default_session - -logger = logging.getLogger(__name__) +from ..common.logging import get_logger +from ..common.yaml_utils import dump as yaml_dump +from ..common.yaml_utils import dump_all as yaml_dump_all +from ..session import get_default_session +from .component._base import UnRegisteredComponent +from .types import InputsSpec, OutputsSpec, PipelineParameter, PipelineVariable +from .types.artifact import PipelineArtifact, PipelineArtifactElement + +logger = get_logger(__name__) class Pipeline(UnRegisteredComponent): diff --git a/pai/pipeline/run.py b/pai/pipeline/run.py index c286a23..466d5a6 100644 --- a/pai/pipeline/run.py +++ b/pai/pipeline/run.py @@ -14,18 +14,18 @@ from __future__ import absolute_import -import logging import time from concurrent.futures import ThreadPoolExecutor from datetime import datetime from typing import Callable, Optional -from pai.api.base import PaginatedResult -from pai.exception import PAIException -from pai.pipeline.artifact import ArchivedArtifact -from pai.session import Session, get_default_session +from ..api.base import PaginatedResult +from ..common.logging import get_logger +from ..exception import PAIException +from ..session import Session, get_default_session +from .artifact import ArchivedArtifact -logger = logging.getLogger(__name__) +logger = get_logger(__name__) # TODO: review the status names of the PipelineRun. diff --git a/pai/predictor.py b/pai/predictor.py index fdd06e5..f5d1a51 100644 --- a/pai/predictor.py +++ b/pai/predictor.py @@ -16,7 +16,6 @@ import base64 import functools import json -import logging import posixpath import time from abc import ABC, abstractmethod @@ -30,7 +29,8 @@ from .common.consts import FrameworkTypes from .common.docker_utils import ContainerRun -from .common.utils import http_user_agent +from .common.logging import get_logger +from .common.utils import http_user_agent, is_package_available from .exception import PredictionException from .serializers import ( JsonSerializer, @@ -40,10 +40,14 @@ ) from .session import Session, get_default_session -logger = logging.getLogger(__name__) +if is_package_available("openai"): + from openai import OpenAI + + +logger = get_logger(__name__) _PAI_SERVICE_CONSOLE_URI_PATTERN = ( - "https://pai.console.aliyun.com/?regionId={region_id}#" + "https://pai.console.aliyun.com/?regionId={region_id}&workspaceId={workspace_id}#" "/eas/serviceDetail/{service_name}/detail" ) @@ -73,7 +77,6 @@ def completed_status(cls): class EndpointType(object): - # Public Internet Endpoint INTERNET = "INTERNET" @@ -82,7 +85,6 @@ class EndpointType(object): class ServiceType(object): - Standard = "Standard" Async = "Async" @@ -179,21 +181,31 @@ def service_status(self): return self._service_api_object["Status"] @property - def access_token(self): + def access_token(self) -> str: """Access token of the service.""" return self._service_api_object["AccessToken"] + @property + def labels(self) -> Dict[str, str]: + """Labels of the service.""" + labels = { + item["LabelKey"]: item["LabelValue"] + for item in self._service_api_object.get("Labels", []) + } + return labels + @property def console_uri(self): """Returns the console URI of the service.""" return _PAI_SERVICE_CONSOLE_URI_PATTERN.format( + workspace_id=self.session.workspace_id, region_id=self.session.region_id, service_name=self.service_name, ) def _get_default_serializer(self): """Get default serializer for the predictor by inspecting the service config.""" - from pai.model import _BuiltinProcessor + from pai.model._model import _BuiltinProcessor service_config = json.loads(self._service_api_object["ServiceConfig"]) processor_code = service_config.get("processor") @@ -297,21 +309,79 @@ def delete_service(self): self.session.service_api.delete(name=self.service_name) def wait_for_ready(self): - """Wait until the service enter running status.""" + """Wait until the service enter running status. + + Raises: + RuntimeError: Raise if the service terminated unexpectedly. + + """ + if self.service_status == ServiceStatus.Running: + return + logger.info( "Service waiting for ready: service_name={}".format(self.service_name) ) unexpected_status = ServiceStatus.completed_status() unexpected_status.remove(ServiceStatus.Running) - type(self)._wait_for_status( service_name=self.service_name, status=ServiceStatus.Running, unexpected_status=unexpected_status, session=self.session, ) + + # hack: PAI-EAS gateway may not be ready when the service is ready. + self._wait_for_gateway_ready() self.refresh() + def wait(self): + """Wait for the service to be ready.""" + return self.wait_for_ready() + + def _wait_for_gateway_ready(self, attempts: int = 60, interval: int = 2): + """Hacky way to wait for the service gateway to be ready. + + Args: + attempts (int): Number of attempts to wait for the service gateway to be + ready. + interval (int): Interval between each attempt. + """ + + def _is_gateway_ready(): + # can't use HEAD method to check gateway status because the service will + # block the request until timeout. + resp = self._send_request(method="GET") + logger.debug( + "Check gateway status result: status_code=%s content=%s", + resp.status_code, + resp.content, + ) + res = not ( + # following status code and content indicates the gateway is not ready + ( + resp.status_code == 503 + and (b"no healthy upstream" in resp.content or not resp.content) + ) + or (resp.status_code == 404 and not resp.content) + ) + return res + + err_count_threshold = 3 + err_count = 0 + while attempts > 0: + attempts -= 1 + try: + if _is_gateway_ready(): + break + except requests.exceptions.RequestException as e: + err_count += 1 + if err_count >= err_count_threshold: + logger.warning("Failed to check gateway status: %s", e) + break + time.sleep(interval) + else: + logger.warning("Timeout waiting for gateway to be ready.") + @classmethod def _wait_for_status( cls, @@ -661,6 +731,36 @@ def raw_predict( ) return resp + def openai(self, url_suffix: str = "v1", **kwargs) -> "OpenAI": + """Initialize an OpenAI client from the predictor. + + Only used for OpenAI API compatible services, such as Large Language Model + service from PAI QuickStart. + + Args: + url_suffix (str, optional): URL suffix that will be appended to the + EAS service endpoint to form the base URL for the OpenAI client. + (Default "v1"). + **kwargs: Additional keyword arguments for the OpenAI client. + + Returns: + OpenAI: An OpenAI client object. + """ + + if not is_package_available("openai"): + raise ImportError( + "openai package is not installed, install it with `pip install openai`." + ) + + if url_suffix.startswith("/"): + default_base_url = posixpath.join(self.endpoint, url_suffix[1:]) + else: + default_base_url = posixpath.join(self.endpoint, url_suffix) + base_url = kwargs.pop("base_url", default_base_url) + api_key = kwargs.pop("api_key", self.access_token) + + return OpenAI(base_url=base_url, api_key=api_key, **kwargs) + class WaitConfig(object): """WaitConfig is used to set polling configurations for waiting for asynchronous @@ -1304,6 +1404,9 @@ def wait_for_ready(self): self._wait_local_server_ready() time.sleep(5) + def wait(self): + return self.wait_for_ready() + def _wait_local_server_ready( self, interval: int = 5, @@ -1327,6 +1430,6 @@ def _wait_local_server_ready( break except requests.ConnectionError: # ConnectionError means server is not ready. - logging.debug("Waiting for the container to be ready...") + logger.debug("Waiting for the container to be ready...") time.sleep(interval) continue diff --git a/pai/processor.py b/pai/processor.py new file mode 100644 index 0000000..1d4a524 --- /dev/null +++ b/pai/processor.py @@ -0,0 +1,332 @@ +# Copyright 2024 Alibaba, Inc. or its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +from datetime import datetime +from typing import Any, Dict, List, Optional, Union + +from .common.consts import JobType, StoragePathCategory +from .common.logging import get_logger +from .common.utils import experimental, random_str, to_plain_text +from .job import ( + AlgorithmSpec, + Channel, + CodeDir, + ExperimentConfig, + SpotSpec, + TrainingJob, + UriOutput, + UserVpcConfig, + _TrainingJobSubmitter, +) +from .job._training_job import ResourceType +from .session import Session, get_default_session + +logger = get_logger(__name__) + + +@experimental +class Processor(_TrainingJobSubmitter): + def __init__( + self, + image_uri: str, + command: Union[str, List[str]], + source_dir: Optional[str] = None, + job_type: str = JobType.PyTorchJob, + parameters: Optional[Dict[str, Any]] = None, + environments: Optional[Dict[str, str]] = None, + requirements: Optional[List[str]] = None, + max_run_time: Optional[int] = None, + base_job_name: Optional[str] = None, + output_path: Optional[str] = None, + instance_type: Optional[str] = None, + spot_spec: Optional[SpotSpec] = None, + resource_type: Optional[Union[str, ResourceType]] = None, + instance_count: Optional[int] = None, + user_vpc_config: Optional[UserVpcConfig] = None, + experiment_config: Optional[ExperimentConfig] = None, + settings: Optional[Dict[str, Any]] = None, + labels: Optional[Dict[str, str]] = None, + session: Optional[Session] = None, + ): + """Processor constructor. + + Args: + image_uri (str): The image used in the job. It can be an image + provided by PAI or a user customized image. To view the images provided + by PAI, please refer to the document: + https://help.aliyun.com/document_detail/202834.htm. + command (Union[str, List[str]): The command used to run the job. + source_dir (str, optional): The local source code directory used in the + job. The directory will be packaged and uploaded to an OSS + bucket, then downloaded to the `/ml/usercode` directory in the + job container. If there is a `requirements.txt` file in the source code + directory, the corresponding dependencies will be installed before the + script runs. + + If 'git_config' is provided, 'source_dir' should be a relative location + to a directory in the Git repo. With the following GitHub repo directory + structure: + + .. code:: + + |----- README.md + |----- src + |----- train.py + |----- test.py + + if you need 'src' directory as the source code directory, you can assign + source_dir='./src/'. + job_type (str): The type of job, which can be TFJob, PyTorchJob, XGBoostJob, + etc. Default value is PyTorchJob. + parameters (dict, optional): A dictionary that represents the + parameters used in the job. The parameters will be + stored in the `/ml/input/config/hyperparameters.json` as a JSON + dictionary in the container. + environments: A dictionary that maps environment variable names to their values. + This optional field allows you to provide a set of environment variables that will be + applied to the context where the code is executed. + requirements (list, optional): An optional list of strings that specifies the Python + package dependencies with their versions. Each string in the list should be in the format + 'package' or 'package==version'. This is similar to the contents of a requirements.txt file used + in Python projects. If requirements.txt is provided in user code directory, requirements + will override the conflict dependencies directly. + max_run_time (int, optional): The maximum time in seconds that the + job can run. The job will be terminated after the time is + reached (Default None). + base_job_name (str, optional): The base name used to generate the + job name. + output_path (str, optional): An OSS URI to store the outputs of the + jobs. If not provided, an OSS URI will be generated using the default + OSS bucket in the session. When the `estimator.fit` method is called, + a specific OSS URI under the output_path for each channel is generated + and mounted to the container. + + A completed container directory structure example:: + + /ml + |-- usercode // User source code directory. + | |-- requirements.txt + | `-- train.py + |-- input // Job input + | `-- config + | |-- hyperparameters.json // Hyperparameters in JSON + | | // dictionary format for the + | | // Job + | | + | `-- data // Job input channels + | | // `/ml/input/data/` is a input + | | // channel, and the directory + | | // name is the channel name. + | | // Each directory under the + | |-- test-data + | | `-- test.csv + | `-- train-data + | `-- train.csv + `-- output // Job output channels. + | // Each directory under the + | // `/ml/output/` is an output + | // channel, and the directory + | // name is the channel name. + `-- model + `-- checkpoints + + instance_type (str): The machine instance type used to run the job. + To view the supported machine instance types, please refer to the + document: + https://help.aliyun.com/document_detail/171758.htm#section-55y-4tq-84y. + If the instance_type is "local", the job is executed locally + using docker. + instance_count (int): The number of machines used to run the job. + resource_type (str, optional): The resource type used to run the training job. + By default, general computing resource is used. If the resource_type is + 'Lingjun', Lingjun computing resource is used. + user_vpc_config (:class:`pai.estimator.UserVpcConfig`, optional): The VPC + configuration used to enable the job instance to connect to the + specified user VPC. If provided, an Elastic Network Interface (ENI) will + be created and attached to the job instance, allowing the + instance to access the resources within the specified VPC. Default to + None. + experiment_config(:class:`pai.estimator.ExperimentConfig`, optional): The + experiment configuration used to construct the relationship between the + job and the experiment. If provided, the training job will belong to the + specified experiment, in which case the job will use artifact_uri of + settings (dict, optional): A dictionary that represents the additional settings + for job, such as AIMaster configurations. + experiment as default output path. Default to None. + labels (Dict[str, str], optional): A dictionary that maps label names to + their values. This optional field allows you to provide a set of labels + that will be applied to the training job. + session (Session, optional): A PAI session instance used for communicating + with PAI service. + + """ + self.image_uri = image_uri + self.command = command + self.source_dir = source_dir + self.job_type = job_type or JobType.PyTorchJob + self.parameters = parameters or dict() + self.session = session or get_default_session() + + self._input_channels = None + self._output_channels = None + super().__init__( + resource_type=resource_type, + spot_spec=spot_spec, + base_job_name=base_job_name, + output_path=output_path, + experiment_config=experiment_config, + instance_type=instance_type, + instance_count=instance_count or 1, + user_vpc_config=user_vpc_config, + max_run_time=max_run_time, + environments=environments, + requirements=requirements, + labels=labels, + settings=settings, + ) + + def run( + self, + inputs: Dict[str, Any] = None, + outputs: Dict[str, Any] = None, + wait: bool = True, + show_logs: bool = True, + ) -> TrainingJob: + """Submit a job with the given input and output channels. + + Args: + inputs (Dict[str, Any]): A dictionary representing the input data for the + job. Each key/value pair in the dictionary is an input channel, + the key is the channel name, and the value is the input data. The input + data can be an OSS URI or a NAS URI object and will be mounted to the + `/ml/input/data/{channel_name}` directory in the job container. + outputs (Dict[str, Any]): A dictionary representing the output data for the + job. Each key/value pair in the dictionary is an output channel, + the key is the channel name, and the value is the output path. The output + path can be an OSS URI or a NAS URI object and will be mounted to the + `/ml/outputs/data/{channel_name}` directory in the job container. + wait (bool): Specifies whether to block until the training job is completed, + either succeeded, failed, or stopped. (Default True). + show_logs (bool): Whether to show the logs of the job. Default to True. + Note that the logs will be shown only when the `wait` is set to True. + + Returns: + :class:`pai.job.TrainingJob`: A submitted training job. + + Raises: + UnExpectedStatusException: If the job fails. + + """ + inputs = inputs or dict() + outputs = outputs or dict() + job_name = self._gen_job_display_name() + + code_dest = Session.get_storage_path_by_category( + StoragePathCategory.ProcessingSrc, to_plain_text(job_name) + ) + code_dir = self._build_code_input(job_name, self.source_dir, code_dest) + algo_spec = self._build_algorithm_spec( + code_input=code_dir, + ) + inputs = self.build_inputs(inputs, input_channels=algo_spec.input_channels) + outputs = self.build_outputs( + job_name=job_name, + output_channels=algo_spec.output_channels, + outputs=outputs, + ) + + return self._submit( + instance_count=self.instance_count, + instance_type=self.instance_type, + job_name=job_name, + hyperparameters=self.parameters, + environments=self.environments, + requirements=self.requirements, + max_run_time=self.max_run_time, + inputs=inputs, + outputs=outputs, + algorithm_spec=algo_spec, + user_vpc_config=( + self.user_vpc_config.model_dump() if self.user_vpc_config else None + ), + experiment_config=( + self.experiment_config.model_dump() if self.experiment_config else None + ), + labels=self.labels, + wait=wait, + show_logs=show_logs, + ) + + def _gen_job_display_name(self, job_name=None): + """Generate job display name.""" + if job_name: + return job_name + ts = datetime.now().strftime("%Y%m%d_%H%M%S") + return "{}_{}".format(self.base_job_name or "processing_job", ts) + + def _build_algorithm_spec(self, code_input: CodeDir) -> AlgorithmSpec: + """Build a temporary AlgorithmSpec used for submitting the Job.""" + + algorithm_spec = AlgorithmSpec( + command=( + self.command + if isinstance(self.command, list) + else [ + "sh", + "-c", + self.command, + ] + ), + image=self.image_uri, + job_type=self.job_type, + code_dir=code_input, + input_channels=self._input_channels or [], + output_channels=self._output_channels or [], + ) + return algorithm_spec + + def _training_job_base_output(self, job_name: str) -> str: + """Generate the base output path for the job.""" + + bucket_name = self.session.oss_bucket.bucket_name + # replace non-alphanumeric character in job name. + job_name = to_plain_text(job_name) + + if self.output_path: + return os.path.join(self.output_path, f"{job_name}_{random_str(6)}") + + job_output_path = Session.get_storage_path_by_category( + StoragePathCategory.ProcessingJob, f"{job_name}_{random_str(6)}" + ) + return f"oss://{bucket_name}/{job_output_path}" + + def get_outputs_data(self) -> Dict[str, str]: + """Show all outputs data paths. + + Returns: + dict[str, str]: A dictionary of all outputs data paths. + """ + if not self.latest_job: + raise RuntimeError("Current no Job for the processor.") + + return { + ch.name: ch.output_uri if isinstance(ch, UriOutput) else ch.dataset_id + for ch in self.latest_job.outputs + } + + def set_input_channels(self, channels: List[Channel]): + self._input_channels = channels + + def set_output_channels(self, channels: List[Channel]): + self._output_channels = channels diff --git a/pai/schema/base.py b/pai/schema/base.py deleted file mode 100644 index eb81c37..0000000 --- a/pai/schema/base.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright 2023 Alibaba, Inc. or its affiliates. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from marshmallow import EXCLUDE, Schema, fields, post_dump, pre_load - -from ..common.utils import camel_to_snake, snake_to_camel - - -class EntitySchema(Schema): - def __init__(self, instance=None, **kwargs): - super(EntitySchema, self).__init__(**kwargs) - self.instance = instance - - -class BaseAPIResourceSchema(Schema): - """Base schema using in API object serialization and deserialization.""" - - class Meta(object): - unknown = EXCLUDE - - _DefaultFieldsNameMapping = { - "GmtCreateTime": "create_time", - "GmtModifiedTime": "modified_time", - } - - # Mapping API object field name to Python Object/Schema field name.. - FieldNameMapping = {} - - def __init__(self, instance=None, session=None, **kwargs): - super(BaseAPIResourceSchema, self).__init__(**kwargs) - self.instance = instance - self.session = session - - @pre_load - def _filed_name_load_preprocess(self, data, **kwargs): - """Input API object preprocess. - - Transform the input data key to entity filed name. - """ - result = dict() - for name, value in data.items(): - if name in self.FieldNameMapping: - result[self.FieldNameMapping[name]] = value - else: - result[camel_to_snake(name)] = value - return result - - @post_dump - def _filed_name_dump_postprocess(self, data, **kwargs): - """Transform output field name to camel case.""" - filed_name_mapping = self._DefaultFieldsNameMapping.copy() - filed_name_mapping.update(self.FieldNameMapping) - - field_mapping_rev = {value: key for key, value in filed_name_mapping.items()} - result = dict() - for key, value in data.items(): - if value is None: - continue - if key in field_mapping_rev: - result[field_mapping_rev[key]] = value - else: - result[snake_to_camel(key)] = value - return result - - def make_or_reload(self, instance_cls, data): - """Make an instance or reload the instance.""" - if self.instance: - self.instance.__init__(**data) - return self.instance - else: - return instance_cls(session=self.session, **data) - - -class ListOfKVField(fields.Field): - """Mapping a List of key, value to a Dict.""" - - def _serialize(self, value, attr, obj, **kwargs): - res = [] - if not value: - return res - for k, v in value.items(): - res.append( - { - "Key": k, - "Value": v, - } - ) - return res - - def _deserialize(self, value, attr, data, **kwargs): - res = dict() - if not value: - return res - for item in value: - res[item["Key"]] = item["Value"] - return res diff --git a/pai/schema/training_job_schema.py b/pai/schema/training_job_schema.py deleted file mode 100644 index 0b1a28b..0000000 --- a/pai/schema/training_job_schema.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2023 Alibaba, Inc. or its affiliates. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from marshmallow import fields, post_load - -from .base import BaseAPIResourceSchema, ListOfKVField - - -class HyperparameterField(fields.Field): - """Convert between hyperparameters in Dict to hyperparameters in API Object.""" - - def _serialize(self, value, attr, obj, **kwargs): - res = [] - if not value: - return res - for k, v in value.items(): - res.append( - { - "Name": k, - "Value": v, - } - ) - return res - - def _deserialize(self, value, attr, data, **kwargs): - res = dict() - if not value: - return res - for item in value: - res[item["Name"]] = item["Value"] - return res - - -class TrainingJobMetricSchema(BaseAPIResourceSchema): - name = fields.Str() - timestamp = fields.Str() - value = fields.Float() - - -class TrainingJobSchedulerSchema(BaseAPIResourceSchema): - max_running_time_in_seconds = fields.Int() - - -class TrainingJobChannelSchema(BaseAPIResourceSchema): - dataset_id = fields.Str() - input_uri = fields.Str() - name = fields.Str() - - -class TrainingJobStatusTransitionSchema(BaseAPIResourceSchema): - end_time = fields.Str() - reason_code = fields.Str() - reason_message = fields.Str() - start_time = fields.DateTime() - status = fields.Str() - - -class TrainingJobSchema(BaseAPIResourceSchema): - FieldNameMapping = { - "GmtCreateTime": "create_time", - "GmtModifiedTime": "modified_time", - "TrainingJobDescription": "description", - } - - algorithm_name = fields.Str() - algorithm_provider = fields.Str() - algorithm_version = fields.Str() - - hyperparameters = HyperparameterField() - input_channels = fields.List(fields.Dict) - output_channels = fields.List(fields.Dict) - labels = ListOfKVField() - description = fields.Str() - training_job_name = fields.Str() - scheduler = fields.Dict() - compute_resource = fields.Dict() - workspace_id = fields.Str() - - # load only fields - latest_metrics = fields.List(fields.Dict) - algorithm_id = fields.Str(load_only=True) - create_time = fields.DateTime(load_only=True) - modified_time = fields.DateTime(load_only=True) - reason_code = fields.Str() - reason_message = fields.Str() - status = fields.Str() - status_transitions = fields.List(fields.Dict) - training_job_id = fields.Str() - training_job_url = fields.Str() - - @post_load - def _make(self, data, **kwargs): - from pai.estimator import _TrainingJob - - data["instance_count"] = data.get("compute_resource", {}).get("EcsCount") - data["instance_type"] = data.get("compute_resource", {}).get("EcsType") - data["max_running_time_in_seconds"] = data.get("scheduler", {}).get( - "MaxRunningTimeInSeconds" - ) - - return self.make_or_reload(_TrainingJob, data) diff --git a/pai/serializers.py b/pai/serializers.py index d74c926..432f796 100644 --- a/pai/serializers.py +++ b/pai/serializers.py @@ -13,7 +13,6 @@ # limitations under the License. import json -import logging import urllib.request from abc import ABC, abstractmethod from typing import Any, Dict, List, Optional, Tuple, Union @@ -26,9 +25,10 @@ from eas_prediction import pytorch_predict_pb2 as pt_pb from eas_prediction import tf_request_pb2 as tf_pb -from pai.session import Session, get_default_session +from .common.logging import get_logger +from .session import Session, get_default_session -logger = logging.getLogger(__name__) +logger = get_logger(__name__) def _is_pil_image(data) -> bool: diff --git a/pai/session.py b/pai/session.py index bb90a4d..b4d6640 100644 --- a/pai/session.py +++ b/pai/session.py @@ -15,22 +15,28 @@ from __future__ import absolute_import import json -import logging import os.path import posixpath from datetime import datetime -from typing import Any, Dict, Optional, Union +from typing import Any, Dict, Optional, Tuple, Union import oss2 +from alibabacloud_credentials.client import Client as CredentialClient +from alibabacloud_credentials.exceptions import CredentialException from alibabacloud_credentials.models import Config as CredentialConfig from alibabacloud_credentials.utils import auth_constant +from Tea.exceptions import TeaException from .api.api_container import ResourceAPIsContainerMixin -from .common.consts import DEFAULT_CONFIG_PATH +from .api.base import ServiceName +from .api.client_factory import ClientFactory +from .api.workspace import WorkspaceAPI, WorkspaceConfigKeys +from .common.consts import DEFAULT_CONFIG_PATH, PAI_VPC_ENDPOINT, Network +from .common.logging import get_logger from .common.oss_utils import CredentialProviderWrapper, OssUriObj from .common.utils import is_domain_connectable, make_list_resource_iterator -logger = logging.getLogger(__name__) +logger = get_logger(__name__) # Environment variable that indicates where the config path is located. # If it is not provided, "$HOME/.pai/config.json" is used as the default config path. @@ -38,7 +44,6 @@ INNER_REGION_IDS = ["center"] - # Global default session used by the program. _default_session = None @@ -60,6 +65,7 @@ def setup_default_session( oss_bucket_name: Optional[str] = None, oss_endpoint: Optional[str] = None, workspace_id: Optional[Union[str, int]] = None, + network: Optional[Union[str, Network]] = None, **kwargs, ) -> "Session": """Set up the default session used in the program. @@ -81,6 +87,11 @@ def setup_default_session( oss_bucket_name (str, optional): The name of the OSS bucket used in the session. oss_endpoint (str, optional): The endpoint for the OSS bucket. + network (Union[str, Network], optional): The network to use for the connection. + supported values are "VPC" and "PUBLIC". If provided, this value will be used as-is. + Otherwise, the code will first check for an environment variable PAI_NETWORK_TYPE. + If that is not set and the VPC endpoint is available, it will be used. + As a last resort, if all else fails, the PUBLIC endpoint will be used. **kwargs: Returns: @@ -108,12 +119,13 @@ def setup_default_session( # override the config from default session default_session = get_default_session() - - region_id = region_id or default_session.region_id - workspace_id = workspace_id or default_session.workspace_id - oss_bucket_name = oss_bucket_name or default_session.oss_bucket_name - oss_endpoint = oss_endpoint or default_session.oss_endpoint - credential_config = credential_config or default_session.credential_config + if default_session: + region_id = region_id or default_session.region_id + workspace_id = workspace_id or default_session.workspace_id + oss_bucket_name = oss_bucket_name or default_session.oss_bucket_name + oss_endpoint = oss_endpoint or default_session.oss_endpoint + credential_config = credential_config or default_session.credential_config + network = network or default_session.network session = Session( region_id=region_id, @@ -121,6 +133,7 @@ def setup_default_session( oss_bucket_name=oss_bucket_name, oss_endpoint=oss_endpoint, workspace_id=workspace_id, + network=network, **kwargs, ) @@ -142,16 +155,85 @@ def get_default_session() -> "Session": global _default_session if not _default_session: config = load_default_config_file() - if not config: - return - _default_session = Session(**config) + if config: + _default_session = Session(**config) + else: + _default_session = _init_default_session_from_env() return _default_session -def load_default_config_file() -> Optional[Dict[str, Any]]: - """Read config file""" +def _init_default_session_from_env() -> Optional["Session"]: + credential_client = Session._get_default_credential_client() + if not credential_client: + logger.debug("Not found credential from default credential provider chain.") + return + + region_id = _get_current_region_id() + if not region_id: + logger.debug( + "No region id found(env var: REGION or dsw_region), skip init default session" + ) + return + + network = ( + Network.VPC + if is_domain_connectable( + PAI_VPC_ENDPOINT.format(region_id), + timeout=1, + ) + else Network.PUBLIC + ) + + workspace_id = _get_current_workspace_id() + + dsw_instance_id = _get_dsw_instance_id() + if _is_running_in_dsw(): + if dsw_instance_id and not workspace_id: + logger.debug("Getting workspace id by dsw instance id: %s", dsw_instance_id) + workspace_id = Session._get_workspace_id_by_dsw_instance_id( + dsw_instance_id=dsw_instance_id, + cred=credential_client, + region_id=region_id, + network=network, + ) + if not workspace_id: + logger.warning( + "Failed to get workspace id by dsw instance id: %s", dsw_instance_id + ) + return + elif _is_running_in_dlc(): + pass + else: + logger.debug( + "No dsw instance id (env var: DSW_INSTANCE_ID) found, skip init default session" + ) + return + + bucket_name, oss_endpoint = Session.get_default_oss_storage( + workspace_id, credential_client, region_id, network + ) + + if not bucket_name: + logger.warning( + "Default OSS storage is not configured for the workspace: %s", workspace_id + ) + + sess = Session( + region_id=region_id, + workspace_id=workspace_id, + credential_config=None, + oss_bucket_name=bucket_name, + oss_endpoint=oss_endpoint, + network=network, + ) + + return sess + - config_path = DEFAULT_CONFIG_PATH +def load_default_config_file( + config_path: Optional[str] = DEFAULT_CONFIG_PATH, +) -> Optional[Dict[str, Any]]: + """Read config file""" if not os.path.exists(config_path): return @@ -172,6 +254,36 @@ def load_default_config_file() -> Optional[Dict[str, Any]]: return config +def _is_running_in_dlc() -> bool: + return os.environ.get("DLC_JOB_ID") is not None + + +def _is_running_in_dsw() -> bool: + return os.environ.get("DSW_INSTANCE_ID") is not None + + +def _get_current_region_id() -> str: + # legacy region id env var in DSW + region_id = os.getenv("dsw_region") + region_id = os.getenv("REGION", region_id) + return region_id + + +def _get_current_workspace_id() -> str: + # legacy workspace id + workspace_id = os.getenv("PAI_AI_WORKSPACE_ID") + workspace_id = os.getenv("PAI_WORKSPACE_ID", workspace_id) + return workspace_id + + +def _get_dlc_job_id() -> str: + return os.environ.get("DLC_JOB_ID") + + +def _get_dsw_instance_id() -> str: + return os.environ.get("DSW_INSTANCE_ID") + + class Session(ResourceAPIsContainerMixin): """A class responsible for communicating with PAI services.""" @@ -203,12 +315,18 @@ def __init__( self._credential_config = credential_config self._region_id = region_id - self._workspace_id = workspace_id + self._workspace_id = str(workspace_id) self._oss_bucket_name = oss_bucket_name self._oss_endpoint = oss_endpoint header = kwargs.pop("header", None) - super(Session, self).__init__(header=header) + network = kwargs.pop("network", None) + runtime = kwargs.pop("runtime", None) + if kwargs: + logger.warning( + "Unused arguments found in session initialization: %s", kwargs + ) + super(Session, self).__init__(header=header, network=network, runtime=runtime) @property def region_id(self) -> str: @@ -437,3 +555,66 @@ def is_gpu_inference_instance(self, instance_type: str) -> bool: "Please provide a supported instance type." ) return bool(spec["GPU"]) + + @staticmethod + def get_default_oss_storage( + workspace_id: str, cred: CredentialClient, region_id: str, network: Network + ) -> Tuple[Optional[str], Optional[str]]: + acs_ws_client = ClientFactory.create_client( + service_name=ServiceName.PAI_WORKSPACE, + credential_client=cred, + region_id=region_id, + network=network, + ) + workspace_api = WorkspaceAPI( + acs_client=acs_ws_client, + ) + resp = workspace_api.list_configs( + workspace_id=workspace_id, + config_keys=WorkspaceConfigKeys.DEFAULT_OSS_STORAGE_URI, + ) + oss_storage_uri = next( + ( + item["ConfigValue"] + for item in resp["Configs"] + if item["ConfigKey"] == WorkspaceConfigKeys.DEFAULT_OSS_STORAGE_URI + ), + None, + ) + + # Default OSS storage uri is not set. + if not oss_storage_uri: + return None, None + uri_obj = OssUriObj(oss_storage_uri) + if network == Network.VPC: + endpoint = "oss-{}-internal.aliyuncs.com".format(region_id) + else: + endpoint = "oss-{}.aliyuncs.com".format(region_id) + return uri_obj.bucket_name, endpoint + + @staticmethod + def _get_default_credential_client() -> Optional[CredentialClient]: + try: + # Initialize the credential client with default credential chain. + # see: https://help.aliyun.com/zh/sdk/developer-reference/v2-manage-python-access-credentials#3ca299f04bw3c + return CredentialClient() + except CredentialException: + return + + @staticmethod + def _get_workspace_id_by_dsw_instance_id( + dsw_instance_id: str, cred: CredentialClient, region_id: str, network: Network + ) -> Optional[str]: + """Get workspace id by dsw instance id""" + dsw_client = ClientFactory.create_client( + service_name=ServiceName.PAI_DSW, + credential_client=cred, + region_id=region_id, + network=network, + ) + try: + resp = dsw_client.get_instance(dsw_instance_id) + return resp.body.workspace_id + except TeaException as e: + logger.warning("Failed to get instance info by dsw instance id: %s", e) + return diff --git a/pai/tensorboard.py b/pai/tensorboard.py index 483a78b..2d32c5c 100644 --- a/pai/tensorboard.py +++ b/pai/tensorboard.py @@ -52,7 +52,7 @@ class TensorBoard(object): def __init__(self, tensorboard_id: str, session: Optional[Session] = None): self.session = session or get_default_session() self.tensorboard_id = tensorboard_id - self._api_object = session.tensorboard_api.get(tensorboard_id) + self._api_object = self.session.tensorboard_api.get(tensorboard_id) def __repr__(self): return "TensorBoard(tensorboard_id={}, name={}, status={})".format( diff --git a/pai/toolkit/config.py b/pai/toolkit/config.py index e4e5b12..a03465a 100644 --- a/pai/toolkit/config.py +++ b/pai/toolkit/config.py @@ -13,19 +13,29 @@ # limitations under the License. import locale -import logging import os.path +from enum import Enum from typing import Any, Dict, List, Optional import oss2 from alibabacloud_credentials.client import Client as CredentialClient from alibabacloud_credentials.exceptions import CredentialException from alibabacloud_credentials.models import Config as CredentialConfig +from alibabacloud_credentials.providers import ( + CredentialsUriProvider, + EcsRamRoleCredentialProvider, + EnvironmentVariableCredentialsProvider, + OIDCRoleArnCredentialProvider, + ProfileCredentialsProvider, + RamRoleArnCredentialProvider, + RsaKeyPairCredentialProvider, +) from alibabacloud_credentials.utils import auth_constant from oss2.models import SimplifiedBucketInfo from prompt_toolkit import prompt from prompt_toolkit.validation import Validator +from ..common.logging import get_logger from ..common.oss_utils import OssUriObj from ..common.utils import ( is_domain_connectable, @@ -48,7 +58,7 @@ validate_bucket_name, ) -logger = logging.getLogger(__name__) +logger = get_logger(__name__) DEFAULT_CONFIG_PATH = os.path.join(os.path.expanduser("~"), ".pai", "config.json") DEFAULT_CREDENTIAL_INI_PATH = os.path.join( @@ -81,6 +91,67 @@ def _get_default_credential_client() -> Optional[CredentialClient]: logger.debug("Not found credential from default credential provider chain.") +class CredentialProviderType(Enum): + EnvironmentVariable = EnvironmentVariableCredentialsProvider + OIDCRoleArn = OIDCRoleArnCredentialProvider + EcsRamRole = EcsRamRoleCredentialProvider + RamRoleArn = RamRoleArnCredentialProvider + RsaKeyPair = RsaKeyPairCredentialProvider + Profile = ProfileCredentialsProvider + CredentialUri = CredentialsUriProvider + + @classmethod + def get_current_provider(cls) -> Optional["CredentialProviderType"]: + from alibabacloud_credentials.providers import DefaultCredentialsProvider + + d = {t.value: t for t in cls} + provider = DefaultCredentialsProvider() + for p in provider.user_configuration_providers: + if p.get_credentials(): + return d.get(p.__class__) + + @classmethod + def credential_hint(cls, cred_type: Optional["CredentialProviderType"]) -> str: + provider_hints = { + CredentialProviderType.EnvironmentVariable: localized_text( + "The credential source is: Environment Variable", + "凭证来源: 环境变量(ALIBABACLOUD_ACCESS_KEY_ID, ALIBABACLOUD_ACCESS_KEY_SECRET)", + ), + CredentialProviderType.OIDCRoleArn: localized_text( + "The credential source is: OIDC Role Arn", + "凭证来源: OIDC Role Arn", + ), + CredentialProviderType.EcsRamRole: localized_text( + "The credential source is: ECS Ram Role", + "凭证来源: ECS Ram Role", + ), + CredentialProviderType.RamRoleArn: localized_text( + "The credential source is: Ram Role Arn", + "凭证来源: Ram Role Arn", + ), + CredentialProviderType.RsaKeyPair: localized_text( + "The credential source is: RSA Key Pair", + "凭证来源: RSA Key Pair", + ), + CredentialProviderType.Profile: localized_text( + "The credential source is: Profile", + "凭证来源: Profile(~/.alibabacloud/credentials.ini)", + ), + CredentialProviderType.CredentialUri: localized_text( + "The credential source is: CredentialUri (EnvironmentVairbale ALIBABA_CLOUD_CREDENTIALS_URI)", + "凭证来源: CredentialUri (环境变量 ALIBABA_CLOUD_CREDENTIALS_URI)", + ), + } + + return provider_hints.get( + cred_type, + localized_text( + "The credential source is: Unknown", + "凭证来源: 未知", + ), + ) + + def prompt_for_credential(): default_credential_client = _get_default_credential_client() if not default_credential_client: @@ -121,12 +192,19 @@ def prompt_for_credential(): ) credential_client = CredentialClient(config=credential_config) else: + # Credential chain documentation: + # https://help.aliyun.com/zh/sdk/developer-reference/v2-manage-python-access-credentials print( localized_text( - "Use credential from default credential provider chain.", - "使用默认的凭证链获取密钥.", + "Use credential from default credential provider chain:", + "使用默认的凭证链获取访问密钥:", ) ) + + credential_source_hint = CredentialProviderType.credential_hint( + CredentialProviderType.get_current_provider() + ) + print(credential_source_hint) credential_client = default_credential_client credential_config = None @@ -175,6 +253,12 @@ def prompt_for_region(): for key in REGION_ID_ENV_KEYS: region_id = os.environ.get(key) if region_id: + print( + localized_text( + f"Config RegionId from environment variable({key}): {region_id} ", + f"从环境变量({key})中获取RegionId: {region_id}", + ) + ) return region_id region_name_map = {r["regionId"]: r["regionName"] for r in REGION_INFOS} @@ -308,7 +392,7 @@ def workspace_choice_name(workspace: Dict[str, Any]): def prompt_for_oss_bucket(user_profile: UserProfile, workspace_id: str): - default_storage_uri = user_profile.get_default_oss_storage_uri( + default_storage_uri, endpoint = user_profile.get_default_oss_storage_uri( workspace_id=workspace_id ) print( @@ -348,7 +432,6 @@ def prompt_for_oss_bucket(user_profile: UserProfile, workspace_id: str): ) bucket_name = prompt_for_create_oss_bucket(user_profile, workspace_id) else: - buckets: List[SimplifiedBucketInfo] = user_profile.list_oss_buckets() index = radio_list_prompt( localized_text( "Please select the OSS Bucket you want to use:", @@ -359,24 +442,48 @@ def prompt_for_oss_bucket(user_profile: UserProfile, workspace_id: str): ) bucket_name = buckets[index].name - bucket_info = user_profile.get_bucket_info(bucket_name) + try: + bucket_info = user_profile.get_bucket_info(bucket_name=bucket_name) + except oss2.exceptions.AccessDenied: + # try to get bucket info with ListBuckets API if the user has no permission to + # GetBucketInfo API. + buckets = user_profile.list_oss_buckets(prefix=bucket_name) + bucket_info = next((b for b in buckets if b.name == bucket_name), None) + + if not bucket_info: + print_warning( + localized_text( + "Failed to get bucket info, use default endpoint.", + "获取 Bucket 信息失败,使用默认 Endpoint。", + ) + ) + region_id = user_profile.region_id + extranet_endpoint, intranet_endpoint = ( + f"oss-{region_id}.aliyuncs.com", + f"oss-{region_id}-internal.aliyuncs.com", + ) + else: + extranet_endpoint, intranet_endpoint = ( + bucket_info.extranet_endpoint, + bucket_info.intranet_endpoint, + ) # If Workspace has no default OSS storage URI and user has permission to edit, # prompt to set the default OSS storage URI. if not default_storage_uri and user_profile.has_permission_edit_config( workspace_id=workspace_id ): - prompt_for_set_default_oss_storage(user_profile, workspace_id, bucket_info) + prompt_for_set_default_oss_storage( + user_profile, workspace_id, bucket_name, intranet_endpoint=intranet_endpoint + ) row_format = "{:<60}{}" - intra_endpoint_connectable = is_domain_connectable( - bucket_info.intranet_endpoint, timeout=1 - ) + intra_endpoint_connectable = is_domain_connectable(intranet_endpoint, timeout=1) candidates = [ ( - bucket_info.intranet_endpoint, + intranet_endpoint, row_format.format( - bucket_info.intranet_endpoint, + intranet_endpoint, localized_text( "Internal endpoint (Please use in PAI-DSW Notebook, ECS and other " "intranet environment)", @@ -385,9 +492,9 @@ def prompt_for_oss_bucket(user_profile: UserProfile, workspace_id: str): ), ), ( - bucket_info.extranet_endpoint, + extranet_endpoint, row_format.format( - bucket_info.extranet_endpoint, + extranet_endpoint, localized_text( "Public endpoint", "外网Endpoint", @@ -411,7 +518,10 @@ def prompt_for_oss_bucket(user_profile: UserProfile, workspace_id: str): def prompt_for_set_default_oss_storage( - user_profile: UserProfile, workspace_id: str, bucket_info + user_profile: UserProfile, + workspace_id: str, + bucket_name: str, + intranet_endpoint: str, ): yes_no = confirm( localized_text( @@ -420,7 +530,9 @@ def prompt_for_set_default_oss_storage( ) ) if yes_no: - user_profile.set_default_oss_storage(workspace_id, bucket_info) + user_profile.set_default_oss_storage( + workspace_id, bucket_name, intranet_endpoint=intranet_endpoint + ) def prompt_for_create_oss_bucket(user_profile: UserProfile, workspace_id): @@ -522,6 +634,62 @@ def prompt_for_config_writing( ) +def prompt_config_with_default_dsw_role(user_profile: UserProfile): + print( + localized_text( + "The current DSW instance is bound to the default PAI DSW role," + " automatically utilizes the instance's workspace and OSS Bucket configurations.", + "当前DSW实例绑定PAI DSW默认角色,自动使用实例的工作空间和OSS Bucket配置", + ) + ) + instance_id = os.environ.get("DSW_INSTANCE_ID") + if not instance_id: + raise RuntimeError( + "Not found PAI DSW instance id from environment variable: DSW_INSTANCE_ID" + ) + instance_info = user_profile.get_instance_info(instance_id=instance_id) + workspace_id = instance_info["WorkspaceId"] + workspace_name = instance_info["WorkspaceName"] + user_id = instance_info["UserId"] + roles = user_profile.get_roles_in_workspace(workspace_id, user_id) + role_info = ", ".join(roles) + print_highlight( + localized_text( + "Current workspace configuration:", + "当前实例的工作空间信息:", + ) + ) + print_highlight( + "WorkspaceName: {}\nWorkspaceId: {}\nRoles: {}".format( + workspace_name, + workspace_id, + role_info, + ) + ) + + default_storage_uri, endpoint = user_profile.get_default_oss_storage_uri( + workspace_id=workspace_id, + ) + + if not default_storage_uri: + print_warning( + localized_text( + "WARNING: The STS credential generated by the default ROLE only support accessing " + "the default OSS Bucket storage of the workspace.\n" + "It is not configured for the current workspace, please " + "reference the document to configure the default OSS Bucket storage: \n" + "https://help.aliyun.com/zh/pai/user-guide/manage-workspaces#section-afd-ntr-nwh", + '警告:默认角色产生的STS凭证仅支持访问"工作空间默认存储"的OSS Bucket。\n' + "当前工作空间没有配置默认OSS Bucket存储,请参考帮助文档进行配置:\n" + "https://help.aliyun.com/zh/pai/user-guide/manage-workspaces#section-afd-ntr-nwh", + ) + ) + bucket_name, endpoint = None, None + else: + bucket_name = OssUriObj(default_storage_uri).bucket_name + return workspace_id, bucket_name, endpoint + + def run(): """ The flow of pai config. @@ -542,12 +710,19 @@ def run(): # Ask for Account profile user_profile = prompt_for_credential() - # Ask for workspace - workspace_id = prompt_for_workspace(user_profile=user_profile) - # Ask for OSS Bucket - bucket_name, bucket_endpoint = prompt_for_oss_bucket( - user_profile=user_profile, workspace_id=workspace_id - ) + if user_profile.is_dsw_default_role(): + ( + workspace_id, + bucket_name, + bucket_endpoint, + ) = prompt_config_with_default_dsw_role(user_profile=user_profile) + else: + # Ask for workspace + workspace_id = prompt_for_workspace(user_profile=user_profile) + # Ask for OSS Bucket + bucket_name, bucket_endpoint = prompt_for_oss_bucket( + user_profile=user_profile, workspace_id=workspace_id + ) # Ask for config writing prompt_for_config_writing( diff --git a/pai/toolkit/helper/utils.py b/pai/toolkit/helper/utils.py index 881b54f..69ef10d 100644 --- a/pai/toolkit/helper/utils.py +++ b/pai/toolkit/helper/utils.py @@ -13,10 +13,9 @@ # limitations under the License. import locale -import logging import os import re -from typing import List +from typing import Any, Dict, List, Optional, Tuple import oss2 from alibabacloud_credentials.client import Client as CredentialClient @@ -26,7 +25,7 @@ GetCallerIdentityResponseBody as CallerIdentity, ) from alibabacloud_tea_openapi import models as open_api_models -from oss2.models import SimplifiedBucketInfo +from oss2.models import BucketInfo, SimplifiedBucketInfo from prompt_toolkit import Application from prompt_toolkit.key_binding import KeyBindings, merge_key_bindings from prompt_toolkit.key_binding.defaults import load_key_bindings @@ -37,16 +36,27 @@ from ...api.base import ServiceName from ...api.client_factory import ClientFactory from ...api.workspace import WorkspaceAPI, WorkspaceConfigKeys +from ...common.consts import DEFAULT_NETWORK_TYPE, PAI_VPC_ENDPOINT, Network +from ...common.logging import get_logger from ...common.oss_utils import CredentialProviderWrapper, OssUriObj -from ...common.utils import make_list_resource_iterator +from ...common.utils import is_domain_connectable, make_list_resource_iterator +from ...libs.alibabacloud_pai_dsw20220101.client import Client as DswClient +from ...session import Session -logger = logging.getLogger(__name__) +logger = get_logger(__name__) locale_code, _ = locale.getdefaultlocale() OSS_NAME_PATTERN = re.compile(pattern="^[a-z0-9][a-z0-9-]{1,61}[a-z0-9]$") ZH_CN_LOCAL = "zh_CN" +# RoleARN pattern for AssumedRole CallerIdentity +ASSUMED_ROLE_ARN_PATTERN = re.compile(r"acs:ram::\d+:assumed-role/([^/]+)/.*") + +# DSW Notebook Default Role Name: +PAI_DSW_DEFAULT_ROLE_NAME = "aliyunpaidswdefaultrole" + + DEFAULT_PRODUCT_RAM_ROLE_NAMES = [ "AliyunODPSPAIDefaultRole", "AliyunPAIAccessingOSSRole", @@ -96,6 +106,15 @@ def __init__( ): self.region_id = region_id self.credential_config = credential_config + + if DEFAULT_NETWORK_TYPE: + self.network = Network.from_string(DEFAULT_NETWORK_TYPE) + else: + self.network = ( + Network.VPC + if is_domain_connectable(PAI_VPC_ENDPOINT.format(self.region_id)) + else Network.PUBLIC + ) self._caller_identify = self._get_caller_identity() def _get_credential_client(self): @@ -119,12 +138,35 @@ def _get_caller_identity(self) -> CallerIdentity: config=open_api_models.Config( credential=self._get_credential_client(), region_id=self.region_id, + network=( + None + if self.network == Network.PUBLIC + else self.network.value.lower() + ), ) ) .get_caller_identity() .body ) + def is_dsw_default_role(self) -> bool: + if self._caller_identify.identity_type != CallerIdentityType.AssumedRoleUser: + return False + m = ASSUMED_ROLE_ARN_PATTERN.match(self._caller_identify.arn) + return m and m.group(1).lower() == PAI_DSW_DEFAULT_ROLE_NAME + + def get_acs_dsw_client(self) -> DswClient: + return ClientFactory.create_client( + service_name=ServiceName.PAI_DSW, + credential_client=self._get_credential_client(), + region_id=self.region_id, + network=self.network, + ) + + def get_instance_info(self, instance_id: str) -> Dict[str, Any]: + dsw_client = self.get_acs_dsw_client() + return dsw_client.get_instance(instance_id).body.to_map() + def get_credential(self): return self._credential_client.get_access_key_id() @@ -153,7 +195,7 @@ def identify_type(self): def get_default_oss_endpoint(self): return "https://oss-{}.aliyuncs.com".format(self.region_id) - def list_oss_buckets(self): + def list_oss_buckets(self, prefix: str = "") -> List[SimplifiedBucketInfo]: buckets: List[SimplifiedBucketInfo] = [] service = oss2.Service( auth=oss2.ProviderAuth( @@ -166,7 +208,9 @@ def list_oss_buckets(self): marker = "" while True: - res: oss2.models.ListBucketsResult = service.list_buckets(marker=marker) + res: oss2.models.ListBucketsResult = service.list_buckets( + prefix=prefix, marker=marker + ) buckets.extend( [b for b in res.buckets if self.region_id in b.location] or [] ) @@ -177,22 +221,16 @@ def list_oss_buckets(self): return buckets - def get_bucket_info(self, bucket_name): - service = oss2.Service( - auth=oss2.ProviderAuth( - credentials_provider=CredentialProviderWrapper( - config=self.credential_config, - ), + def get_bucket_info(self, bucket_name) -> BucketInfo: + auth = oss2.ProviderAuth( + credentials_provider=CredentialProviderWrapper( + config=self.credential_config, ), - endpoint=self.get_default_oss_endpoint(), ) - res: oss2.models.ListBucketsResult = service.list_buckets(prefix=bucket_name) - bucket_info = next((b for b in res.buckets if b.name == bucket_name), None) - if not bucket_info: - raise ValueError( - f"Not found bucket with the specific name: bucket_name={bucket_name}" - ) - + bucket = oss2.Bucket( + auth, self.get_default_oss_endpoint(), bucket_name=bucket_name + ) + bucket_info = bucket.get_bucket_info() return bucket_info def create_oss_bucket(self, bucket_name): @@ -219,41 +257,37 @@ def get_workspace_api(self) -> WorkspaceAPI: service_name=ServiceName.PAI_WORKSPACE, credential_client=self._get_credential_client(), region_id=self.region_id, + network=self.network, ) return WorkspaceAPI( acs_client=acs_ws_client, ) - def get_default_oss_storage_uri(self, workspace_id: str): - workspace_api = self.get_workspace_api() - resp = workspace_api.list_configs( + def get_default_oss_storage_uri( + self, workspace_id: str + ) -> Tuple[Optional[str], Optional[str]]: + bucket_name, endpoint = Session.get_default_oss_storage( workspace_id=workspace_id, - config_keys=WorkspaceConfigKeys.DEFAULT_OSS_STORAGE_URI, - ) - - oss_storage_uri = next( - ( - item["ConfigValue"] - for item in resp["Configs"] - if item["ConfigKey"] == WorkspaceConfigKeys.DEFAULT_OSS_STORAGE_URI - ), - None, + cred=self._get_credential_client(), + region_id=self.region_id, + network=self.network, ) - if not oss_storage_uri: - return - - uri_obj = OssUriObj(oss_storage_uri) - return "oss://{}".format(uri_obj.bucket_name) + return "oss://{}/".format(bucket_name), endpoint - def set_default_oss_storage(self, workspace_id, bucket_info: SimplifiedBucketInfo): + def set_default_oss_storage( + self, workspace_id, bucket_name: str, intranet_endpoint: str + ): workspace_api = self.get_workspace_api() - oss_uri = "oss://{}.{}/".format(bucket_info.name, bucket_info.intranet_endpoint) + oss_uri = "oss://{}.{}/".format(bucket_name, intranet_endpoint) configs = {WorkspaceConfigKeys.DEFAULT_OSS_STORAGE_URI: oss_uri} workspace_api.update_configs(workspace_id, configs=configs) - def get_roles_in_workspace(self, workspace_id) -> List[str]: + def get_roles_in_workspace( + self, workspace_id, user_id: Optional[str] = None + ) -> List[str]: workspace_api = self.get_workspace_api() + user_id = user_id or self.user_id member_info = next( ( mem @@ -261,7 +295,7 @@ def get_roles_in_workspace(self, workspace_id) -> List[str]: workspace_api.list_members, workspace_id=workspace_id, ) - if mem["UserId"] == self.user_id + if mem["UserId"] == user_id ), None, ) diff --git a/pai/tracking/__init__.py b/pai/tracking/__init__.py new file mode 100644 index 0000000..529534f --- /dev/null +++ b/pai/tracking/__init__.py @@ -0,0 +1 @@ +from .lineage import LineageEntity, log_lineage diff --git a/pai/tracking/lineage.py b/pai/tracking/lineage.py new file mode 100644 index 0000000..6753237 --- /dev/null +++ b/pai/tracking/lineage.py @@ -0,0 +1,358 @@ +# Copyright 2023 Alibaba, Inc. or its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +from dataclasses import dataclass +from typing import List, Optional + +from Tea.exceptions import TeaException + +from pai.api.lineage import _LineageEntity +from pai.common.logging import get_logger +from pai.common.utils import ( + parse_bmcpfs_uri, + parse_cpfs_uri, + parse_local_file_uri, + parse_nas_uri, + parse_odps_uri, + parse_oss_uri, + parse_pai_dataset_uri, +) +from pai.session import ( + _get_current_region_id, + _get_dlc_job_id, + _is_running_in_dlc, + get_default_session, +) + +logger = get_logger(__name__) + +# Global default Lineage object used by the program. +_default_lineage = None + + +@dataclass +class LineageEntity: + """ + LineageEntity is a class representing lineage entities, including local file paths, datasets, OSS files, NAS files, CPFS, and MaxCompute resources. + + Attributes: + uri (str): The file URI, supporting the following types: + + - OSS address: Format: oss://.., e.g., oss://dlc-upload-test.oss-cn-hangzhou.aliyuncs.com/dataset/ + - Ordinary NAS: Format: nas://./subpath/to/dir/, where represents the NAS file system ID. E.g., nas://fsId-mountTarget.cn-hangzhou.nas.aliyuncs.com/nas/mountTarget/ + - Extreme NAS: Format: nas://./subpath/to/dir/, where represents the NAS file system ID. E.g., nas://007636fd-gfyy.cn-hangzhou.extreme.nas.aliyuncs.com/mnt/foo/ + - CPFS: + - CPFS1.0 Format: cpfs://./subpath/to/dir/, where is an 8-character ASCII string representing the CPFS file system ID. + - CPFS2.0 Format: cpfs://.//, where is a 16-character ASCII string representing the CPFS file system ID, and is the protocol service ID. E.g., cpfs://cpfs-00f4b992044a71be.cn-hangzhou/ptc-008727a69e07d3cf/exp-00d695a1b9f6c926/ + - BMCPFS: Format: bmcpfs://-., where is a 16-character ASCII string representing the CPFS file system ID. E.g.,bmcpfs://cpfs-291070fd9529c747-000001.cn-wulanchabu.cpfs.aliyuncs.com/ + - Local mounted file path: Format: file://, e.g., file:///mnt/dataset/train. + - PAI dataset: Format: pai://datasets//, where is the PAI dataset ID, and is the PAI dataset version. E.g., pai://datasets/d-f0mniq7j4cgk2x2rrn/v1 + - MaxCompute table: Format: odps:///[schema]/tables/, where is the MaxCompute project name, is the MaxCompute table schema, optional. (see: https://help.aliyun.com/zh/maxcompute/user-guide/schemas?spm=a2c4g.11186623.0.i64), and is the MaxCompute table name. E.g., odps://project_mc/tables/flow_model_label_table_v1 + resource_type (str, optional): The resource type, default as "dataset". Users can customize this. Possible values include: + + - "dataset": Dataset. + - "model": Model. + - User-defined types. + resource_use (str, optional): The resource usage, default as "train". Users can customize this. + + - For "dataset" type, possible values include: + - "train": Training data. + - "validation": Validation data. + - User-defined types. + - For "model" type, possible values include: + - "base": Base model. + - "extension": Extended model. + - User-defined usages. + + """ + + # URI (Uniform Resource Identifier) + uri: str + # The resource type, default as "dataset" + resource_type: Optional[str] = "dataset" + # The resource usage, default as "train" + resource_use: Optional[str] = "train" + + +@dataclass +class _NasDatasourceAttributes: + file_system_id: str + path: str + + +@dataclass +class _PvcDatasourceAttributes: + cluster_id: str + name_space: str + pvc_name: str + path: str + pvc_type: str + + +@dataclass +class _OssDatasourceAttributes: + uri: str + + +def _read_metadata_config_in_dlc(): + try: + with open("/var/metadata/config.json", "r") as file: + return json.load(file) + except (json.JSONDecodeError, FileNotFoundError) as e: + logger.warning("Error parsing data source JSON or file not found: %s", e) + return None + + +def _get_datasource_attributes(source, datasource_type): + if datasource_type == "nas" or datasource_type == "cpfs": + return _NasDatasourceAttributes( + file_system_id=source.get("FileSystemId"), path=source.get("Path") + ) + if datasource_type == "pvc": + return _PvcDatasourceAttributes( + pvc_type=source.get("PvcType"), + pvc_name=source.get("PvcName"), + path=source.get("Path"), + cluster_id=source.get("ClusterId"), + name_space=source.get("NameSpace"), + ) + if datasource_type == "oss": + return _OssDatasourceAttributes(uri=source.get("Uri")) + return None + + +def _find_best_match_source(config, mount_path): + best_match = "" + best_details = {} + + for source in config.get("DATA_SOURCES", []): + datasource_type = source.get("DataSourceType") + mount_path_in_source = source.get("MountPath", "").rstrip("/") + + if mount_path.startswith(mount_path_in_source) and len( + mount_path_in_source + ) > len(best_match): + best_match = mount_path_in_source + best_details = { + "datasource_type": datasource_type, + "datasource_attributes": _get_datasource_attributes( + source, datasource_type + ), + } + + return best_match, best_details + + +def _find_datasource_by_mount_path(mount_path: str): + config = _read_metadata_config_in_dlc() + if config is None: + return None, None, None, None + + best_match, best_details = _find_best_match_source(config, mount_path) + if best_match: + region_id = config.get("DLC_REGION_ID") + remaining_path = mount_path[len(best_match) :].lstrip("/") + return ( + ( + f"{best_details['datasource_attributes'].uri.rstrip('/') + '/'}{remaining_path}" + if best_details["datasource_type"] == "oss" + else None + ), + region_id, + ( + best_details["datasource_attributes"] + if best_details["datasource_type"] == "nas" + or best_details["datasource_type"] == "cpfs" + else None + ), + ( + best_details["datasource_attributes"] + if best_details["datasource_type"] == "pvc" + else None + ), + ) + return None, None, None, None + + +def _fill_lineage_entity(entity: LineageEntity) -> _LineageEntity: + input_uri = entity.uri + local_file_path = parse_local_file_uri(input_uri) + if local_file_path: + ( + uri, + region_id, + nas_entity_attributes, + pvc_entity_attributes, + ) = _find_datasource_by_mount_path(local_file_path) + if nas_entity_attributes: + _entity = _LineageEntity() + _entity.EntityType = "nas-file" + _entity.Attributes = { + "Uri": uri, + "ResourceType": entity.resource_type, + "ResourceUse": entity.resource_use, + "RegionId": region_id, + "FileSystemId": nas_entity_attributes.file_system_id, + "Path": nas_entity_attributes.path, + } + return _entity + if pvc_entity_attributes: + _entity = _LineageEntity() + _entity.EntityType = "pvc-file" + _entity.Attributes = { + "ResourceType": entity.resource_type, + "ResourceUse": entity.resource_use, + "RegionId": region_id, + "ClusterId": pvc_entity_attributes.cluster_id, + "NameSpace": pvc_entity_attributes.name_space, + "PvcName": pvc_entity_attributes.pvc_name, + "Path": pvc_entity_attributes.path, + "PvcType": pvc_entity_attributes.pvc_type, + } + return _entity + if uri: + input_uri = uri + else: + logger.warning(f"can not find uri by mount path: {local_file_path}") + parsed_result = parse_oss_uri(input_uri) + if parsed_result: + bucket_name, region_id, path = parsed_result + _entity = _LineageEntity() + _entity.EntityType = "oss-file" + _entity.Attributes = { + "Bucket": bucket_name, + "Path": path, + "ResourceType": entity.resource_type, + "ResourceUse": entity.resource_use, + "RegionId": region_id, + } + return _entity + parsed_result = parse_pai_dataset_uri(input_uri) + if parsed_result: + dataset_id, dataset_version = parsed_result + try: + dataset_detail = get_default_session().dataset_api.get(dataset_id) + if dataset_detail: + _entity = _LineageEntity() + if dataset_detail.get("Provider") == "pai": + _entity.QualifiedName = ( + f"pai-dataset.{dataset_id}_{dataset_version}".format( + dataset_id=dataset_id, dataset_version=dataset_version + ) + ) + _entity.Name = dataset_detail["Name"] + _entity.Attributes = { + "ResourceUse": entity.resource_use, + "Provider": "pai", + } + return _entity + else: + _entity.QualifiedName = ( + f"pai-dataset.{dataset_id}_{dataset_version}".format( + dataset_id=dataset_id, dataset_version=dataset_version + ) + ) + _entity.Name = dataset_detail["Name"] + _entity.Attributes = { + "ResourceType": entity.resource_type, + "ResourceUse": entity.resource_use, + "RegionId": _get_current_region_id(), + "Uri": dataset_detail["Uri"], + "VersionName": dataset_version, + } + return _entity + except TeaException as e: + logger.warning( + f"can not find dataset by dataset_id: {dataset_id}, {str(e)}" + ) + parsed_result = parse_odps_uri(input_uri) + if parsed_result: + project_name, schema, table_name = parsed_result + _entity = _LineageEntity() + if schema: + _entity.QualifiedName = ( + f"maxcompute-table.{project_name}.{schema}.{table_name}".format( + project_name=project_name, schema=schema, table_name=table_name + ) + ) + else: + _entity.QualifiedName = ( + f"maxcompute-table.{project_name}.{table_name}".format( + project_name=project_name, table_name=table_name + ) + ) + _entity.Attributes = { + "ResourceType": entity.resource_type, + "ResourceUse": entity.resource_use, + } + return _entity + return None + + +class Lineage(object): + def __init__(self): + super() + + def log_lineage( + self, input_entities: List[LineageEntity], output_entities: List[LineageEntity] + ): + """ + Recommended to use the log_lineage(input_entities: List[LineageEntity], output_entities: List[LineageEntity]) + function directly. + """ + session = get_default_session() + if _is_running_in_dlc(): + _input_entities = [] + _output_entities = [] + for input_entity in input_entities: + entity = _fill_lineage_entity(input_entity) + if entity: + _input_entities.append(entity) + for output_entity in output_entities: + entity = _fill_lineage_entity(output_entity) + if entity: + _output_entities.append(entity) + if len(_input_entities) == 0 or len(_output_entities) == 0: + logger.warning("input_entities or output_entities is empty, ignore.") + else: + session.lineage_api.log_lineage( + _input_entities, + _output_entities, + _get_dlc_job_id(), + session.workspace_id, + ) + logger.debug(_input_entities) + logger.debug(_output_entities) + logger.debug(_get_dlc_job_id()) + logger.debug(session.workspace_id) + else: + logger.warning("log_lineage is not supported in non-DLC environment.") + + +def log_lineage( + input_entities: List[LineageEntity], output_entities: List[LineageEntity] +): + """ + Records the lineage relationships generated during model training/data processing, etc. Supported execution + environments include: DLC. If running in other environments, lineage recording will be ignored. + + Args: + input_entities (List[LineageEntity]): A list of input entities, each representing the source of data in DLC tasks. + output_entities (List[LineageEntity]): A list of output entities, each representing the output of data in DLC tasks. + """ + global _default_lineage + if not _default_lineage: + _default_lineage = Lineage() + + _default_lineage.log_lineage(input_entities, output_entities) diff --git a/pai/schema/__init__.py b/pai/version.py similarity index 96% rename from pai/schema/__init__.py rename to pai/version.py index b42e873..e87e0f9 100644 --- a/pai/schema/__init__.py +++ b/pai/version.py @@ -11,3 +11,5 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +VERSION = "0.4.12.dev0" diff --git a/pyproject.toml b/pyproject.toml index 2f8fb69..a3e74c4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,7 +25,7 @@ src_paths = ["pai", "tests"] #known_first_party = ["pai", "tests"] [tool.pytest.ini_options] -timeout = 300 +timeout = 600 [doc8] max-line-length=88 diff --git a/requirements/doc-requirements.txt b/requirements/doc-requirements.txt index d7ba28e..a985769 100644 --- a/requirements/doc-requirements.txt +++ b/requirements/doc-requirements.txt @@ -1,4 +1,4 @@ -sphinx +sphinx>=5.0.0 sphinx-rtd-theme sphinx-copybutton sphinx_book_theme diff --git a/requirements/requirements.txt b/requirements/requirements.txt index f58f634..f5bc171 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -1,13 +1,12 @@ aliyun-python-sdk-core>=2.13.25 alibabacloud_sts20150401 importlib-metadata; python_version < "3.8" -numpy>=1.16.0 +numpy>=1.16.0, <2 oss2>=2.8.0 pyodps>=0.11.0 pyyaml>=5.3.1 six>=1.15.0 -marshmallow -marshmallow-oneofschema>=3.0.1 +pydantic>=2.0.1 eas_prediction>=0.20 alibabacloud_tea_util>=0.3.6, <1.0.0, !=0.3.9 alibabacloud_tea_openapi>=0.3.3, <1.0.0 diff --git a/requirements/test-requirements.txt b/requirements/test-requirements.txt index 6af780b..5693026 100644 --- a/requirements/test-requirements.txt +++ b/requirements/test-requirements.txt @@ -6,3 +6,4 @@ mock>=2.0.0 scikit-learn pandas docker>=4.4.0 +openai diff --git a/setup.py b/setup.py index 520c7df..8df16cc 100644 --- a/setup.py +++ b/setup.py @@ -7,6 +7,12 @@ pkg_root = os.path.dirname(os.path.abspath(__file__)) REQUIREMENTS_FILE = "requirements/requirements.txt" +PACKAGE_NAME = os.getenv("PACKAGE_NAME", "pai") + +version_data = {} +with open(os.path.join(pkg_root, "pai/version.py")) as fp: + exec(fp.read(), version_data) +version = version_data["VERSION"] def read_requirements(): @@ -20,9 +26,9 @@ def read_requirements(): long_description = f.read() setup( - name="alipai", - python_requires=">=3.6", - use_scm_version=True, + name=PACKAGE_NAME, + python_requires=">=3.8", + version=version, setup_requires=["setuptools_scm"], description="Alibaba Cloud PAI Python SDK", long_description=long_description, diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index 5c8c969..d0f4de7 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -16,9 +16,7 @@ import logging import os -import time import unittest -from functools import wraps import oss2 from odps import ODPS @@ -136,27 +134,6 @@ def setUpClass(cls): if cls.default_session.is_inner: PublicMaxComputeTableDataSet.set_dataset_project("pai_inner_project") cls.odps_client = cls._init_maxc_client() - cls.patch_model_deploy() - - @classmethod - def patch_model_deploy(cls): - """Hack for model deploy wait for service ready.""" - from pai.model import ModelBase - - def deco(f): - @wraps(f) - def _(*args, **kwargs): - wait = kwargs.get("wait") - res = f(*args, **kwargs) - # wait is True which means deploy method should wait until the - # prediction service is 'really' ready. - if wait: - time.sleep(15) - return res - - return _ - - ModelBase.deploy = deco(ModelBase.deploy) @classmethod def tearDownClass(cls): @@ -245,3 +222,12 @@ def upload_file(cls, oss_bucket, location, file): bucket_name=oss_bucket.bucket_name, key=key, ) + + @classmethod + def get_oss_uri(cls, oss_bucket, location, file_name=""): + key = location + file_name + + return "oss://{bucket_name}/{key}".format( + bucket_name=oss_bucket.bucket_name, + key=key, + ) diff --git a/tests/integration/test.ini.template b/tests/integration/test.ini.template index 6b88738..a8a4953 100644 --- a/tests/integration/test.ini.template +++ b/tests/integration/test.ini.template @@ -3,6 +3,7 @@ access_key_id= access_key_secret= region_id= +workspace_id= # 算法使用的MaxCompute项目 [odps] diff --git a/tests/integration/test_estimator.py b/tests/integration/test_estimator.py index 2aab717..32ae661 100644 --- a/tests/integration/test_estimator.py +++ b/tests/integration/test_estimator.py @@ -13,13 +13,18 @@ # limitations under the License. import os +import posixpath +import re from unittest import skipUnless import pytest from pai.common.oss_utils import upload +from pai.common.utils import random_str from pai.estimator import AlgorithmEstimator, Estimator +from pai.experiment import Experiment from pai.image import retrieve +from pai.job._training_job import ExperimentConfig, ResourceType, SpotSpec from pai.session import get_default_session from tests.integration import BaseIntegTestCase from tests.integration.utils import t_context @@ -67,11 +72,57 @@ def test_xgb_train(self): "test": self.breast_cancer_test_data_uri, }, ) - model_path = os.path.join(os.path.join(est.model_data(), "model.json")) + self.assertTrue(self.is_oss_object_exists(model_path)) + + def test_output_config(self): + xgb_image_uri = retrieve("xgboost", framework_version="latest").image_uri + sess = get_default_session() + + est = Estimator( + image_uri=xgb_image_uri, + source_dir=os.path.join(test_data_dir, "xgb_train"), + command="python train.py", + hyperparameters={ + "n_estimators": 50, + "objective": "binary:logistic", + "max_depth": 5, + "eval_metric": "auc", + }, + instance_type="ecs.c6.large", + ) + test_output_path = ( + f"oss://{sess.oss_bucket.bucket_name}/sdk-test/test-output/{random_str(6)}/" + ) + est.fit( + inputs={ + "train": self.breast_cancer_train_data_uri, + "test": self.breast_cancer_test_data_uri, + }, + outputs={ + "model": test_output_path, + }, + ) + self.assertEqual(test_output_path, est.model_data()) + model_path = os.path.join(os.path.join(test_output_path, "model.json")) self.assertTrue(self.is_oss_object_exists(model_path)) + @skipUnless(t_context.support_spot_instance, "Skip spot instance test") + def test_use_spot_instance(self): + xgb_image_uri = retrieve("xgboost", framework_version="latest").image_uri + est = Estimator( + command="echo helloworld", + instance_type="ml.gu7ef.8xlarge-gu100", + image_uri=xgb_image_uri, + spot_spec=SpotSpec( + spot_strategy="SpotWithPriceLimit", + spot_discount_limit=0.5, + ), + resource_type=ResourceType.Lingjun, + ) + est.fit() + def test_torch_run(self): torch_image_uri = retrieve("pytorch", framework_version="1.12").image_uri est = Estimator( @@ -93,6 +144,35 @@ def test_torch_run(self): self.assertIsNotNone(tb.app_uri) tb.delete() + def test_checkpoints(self): + sess = get_default_session() + torch_image_uri = retrieve("pytorch", framework_version="1.12").image_uri + filename = "output.txt" + command = ( + f"echo helloworld > /ml/output/checkpoints/{filename} && echo 'helloworld'" + ) + checkpoint_path = f"oss://{sess.oss_bucket.bucket_name}/sdk-test/test-checkpoints/{random_str(6)}/" + + est = Estimator( + image_uri=torch_image_uri, + command=command, + instance_type="ecs.c6.large", + base_job_name="torch_run_", + checkpoints_path=checkpoint_path, + ) + + est.fit( + inputs={ + "training": self.breast_cancer_train_data_uri, + "test": self.breast_cancer_test_data_uri, + }, + wait=True, + ) + self.assertEqual(checkpoint_path, est.checkpoints_data()) + self.assertTrue( + self.is_oss_object_exists(posixpath.join(checkpoint_path, filename)) + ) + def test_max_compute_input(self): image_uri = retrieve("xgboost", framework_version="latest").image_uri est = Estimator( @@ -222,11 +302,7 @@ def test_remote_data(self): ) class TestEstimatorLocalRunGPU(BaseIntegTestCase): def test(self): - image_uri = retrieve( - "pytorch", - "1.12", - accelerator_type="GPU", - ).image_uri + image_uri = retrieve("xgboost", framework_version="latest").image_uri est = Estimator( image_uri=image_uri, @@ -235,3 +311,67 @@ def test(self): instance_type="local_gpu", ) est.fit() + + +class TestTrainWithExperimentConfig(BaseIntegTestCase): + def setUp(self): + exp_name = f"sdk_estimator_test_{random_str(6)}" + self.experiment = Experiment.create( + artifact_uri="oss://{}/sdktest/test_experiment/sdk_estimator_test_experiment/".format( + self.default_session.oss_bucket.bucket_name + ), + name=exp_name, + ) + self.image_uri = retrieve( + "pytorch", + "1.12", + accelerator_type="GPU", + ).image_uri + self.command = "python train.py" + self.source_dir = os.path.join(test_data_dir, "experiment_train") + self.instance_type = "ecs.c6.large" + tensorboard_data_escaped = re.escape(self.experiment.tensorboard_data()) + self.tensorboard_path_regex_pattern = f"^{tensorboard_data_escaped}[a-z0-9]+/$" + + def test_train_with_experiment_config(self): + est = Estimator( + image_uri=self.image_uri, + command=self.command, + source_dir=self.source_dir, + instance_type=self.instance_type, + experiment_config=ExperimentConfig( + experiment_id=self.experiment.experiment_id, + ), + ) + est.fit() + + tensorboard_path = est.tensorboard_data() + self.assertRegex(tensorboard_path, self.tensorboard_path_regex_pattern) + artifact_uri_escaped = re.escape(self.experiment.artifact_uri) + model_path_regex_pattern = f"^{artifact_uri_escaped}[a-z0-9]+/model/$" + self.assertRegex(est.model_data(), model_path_regex_pattern) + + def test_train_with_output_and_experiment_config(self): + output_path = "oss://{}/sdktest/test_experiment/output_config_path/".format( + self.default_session.oss_bucket.bucket_name + ) + est = Estimator( + image_uri=self.image_uri, + command=self.command, + source_dir=self.source_dir, + instance_type=self.instance_type, + output_path=output_path, + experiment_config=ExperimentConfig( + experiment_id=self.experiment.experiment_id, + ), + ) + est.fit() + + output_escaped = re.escape(output_path) + model_path_regex_pattern = f"^{output_escaped}[a-z0-9_]+/model/$" + self.assertRegex(est.model_data(), model_path_regex_pattern) + tensorboard_path = est.tensorboard_data() + self.assertRegex(tensorboard_path, self.tensorboard_path_regex_pattern) + + def tearDown(self): + self.experiment.delete() diff --git a/tests/integration/test_experiment.py b/tests/integration/test_experiment.py new file mode 100644 index 0000000..d204513 --- /dev/null +++ b/tests/integration/test_experiment.py @@ -0,0 +1,91 @@ +# Copyright 2023 Alibaba, Inc. or its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pai.common.utils import random_str +from pai.experiment import Experiment +from pai.tensorboard import TensorBoardStatus +from tests.integration import BaseIntegTestCase + +tensorboard_path_suffix = "tensorboard/" + + +class TestExperiment(BaseIntegTestCase): + def setUp(self): + super(TestExperiment, self).setUp() + self.artifact_uri = "oss://{}/sdktest/test_experiment/".format( + self.default_session.oss_bucket.bucket_name + ) + + def test_create(self): + # Init test data + exp_name = "test_experiment_" + random_str(10) + # Test create + self.experiment = Experiment.create( + artifact_uri=self.artifact_uri, + name=exp_name, + ) + self.assertEqual(self.experiment.name, exp_name) + expected_tb_path = self.artifact_uri + tensorboard_path_suffix + self.assertEqual(self.experiment.tensorboard_data(), expected_tb_path) + + def test_update(self): + exp_name = "test_experiment_" + random_str(10) + self.experiment = Experiment.create( + artifact_uri=self.artifact_uri, + name=exp_name, + ) + # Test update + exp_name = exp_name + "_updated" + self.experiment.update(name=exp_name) + self.assertEqual(self.experiment.name, exp_name) + + def test_list(self): + exp_name = "test_experiment_" + random_str(10) + self.experiment = Experiment.create( + artifact_uri=self.artifact_uri, + name=exp_name, + ) + # Test list + experiment_iterator = Experiment.list(name=exp_name) + experiment_names = [e.name for e in experiment_iterator] + self.assertEqual(len(experiment_names), 1) + self.assertEqual(experiment_names[0], exp_name) + + def test_get(self): + exp_name = "test_experiment_" + random_str(10) + self.experiment = Experiment.create( + artifact_uri=self.artifact_uri, + name=exp_name, + ) + # Test get + exp1 = Experiment.get(experiment_id=self.experiment.experiment_id) + self.assertEqual(self.experiment.name, exp1.name) + self.assertEqual(self.experiment.experiment_id, exp1.experiment_id) + self.assertEqual(self.experiment.tensorboard_data(), exp1.tensorboard_data()) + + def test_tensorboard(self): + exp_name = "test_experiment_" + random_str(10) + self.experiment = Experiment.create( + artifact_uri=self.artifact_uri, + name=exp_name, + ) + # Test tensorboard + tb = self.experiment.tensorboard() + self.assertIsNotNone(tb.app_uri) + self.assertEqual(tb.status, TensorBoardStatus.Running) + tb.delete() + + def tearDown(self): + if hasattr(self, "experiment") and self.experiment: + self.experiment.delete() diff --git a/tests/integration/test_lineage.py b/tests/integration/test_lineage.py new file mode 100644 index 0000000..9a71ab2 --- /dev/null +++ b/tests/integration/test_lineage.py @@ -0,0 +1,300 @@ +# Copyright 2023 Alibaba, Inc. or its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +from unittest.mock import patch + +from pai.common.logging import get_logger +from pai.tracking import LineageEntity, log_lineage +from tests.integration import BaseIntegTestCase +from tests.unit.utils import mock_env + + +class TestLineage(BaseIntegTestCase): + def test_log_lineage_run_in_non_dlc_env(self): + with self.assertLogs( + logger=get_logger("pai.tracking.lineage"), level=logging.WARNING + ) as captured: + log_lineage( + input_entities=[ + LineageEntity( + uri="pai://datasets/d-f0mniq7j4cgk2x2rrn/v1", + resource_type="dataset", + resource_use="train", + ), + ], + output_entities=[ + LineageEntity( + uri="file:///mnt/model/", + resource_type="model", + resource_use="extension", + ) + ], + ) + self.assertIn( + "log_lineage is not supported in non-DLC environment.", + captured.output[0], + ) + + @mock_env(DLC_JOB_ID="d123456") + @mock_env(REGION="cn-hangzhou") + def test_log_lineage_with_no_datasources_config_file_in_dlc(self): + with self.assertLogs( + logger=get_logger("pai.tracking.lineage"), level=logging.WARNING + ) as captured: + log_lineage( + input_entities=[ + LineageEntity( + uri="file:///mnt/input/dataset", + resource_type="model", + resource_use="extension", + ) + ], + output_entities=[ + LineageEntity( + uri="file:///mnt/output/model/model.pth", + resource_type="model", + resource_use="extension", + ) + ], + ) + self.assertIn( + "WARNING:pai.tracking.lineage:Error parsing data source JSON or file not found: [Errno 2] No such file or directory: '/var/metadata/config.json'", + captured.output[0], + ) + + @mock_env(DLC_JOB_ID="d123456") + @mock_env(REGION="cn-hangzhou") + def test_log_lineage_with_valid_mount_path_in_dlc(self): + with self.assertLogs( + logger=get_logger("pai.tracking.lineage"), level=logging.DEBUG + ) as captured: + mock_config = { + "DATA_SOURCES": [ + { + "DataSourceType": "pvc", + "ClusterId": "cbb8f09f999534c5187532a19fe6a3bba", + "MountPath": "/mnt/input/pvc", + "NameSpace": "quota2md3bak6ovi", + "Path": "/nas", + "PvcName": "nas-pvc", + "PvcType": "hostPath", + }, + { + "DataSourceType": "nas", + "DataSourceId": "data16mjfuvf6v3a", + "FileSystemId": "2964e349a2d", + "MountPath": "/mnt/input/nas", + "Path": "/", + "Uri": "", + "Version": "", + }, + { + "DataSourceId": "data824oavjsogd7", + "FileSystemId": "", + "MountPath": "/mnt/output/model", + "Path": "oss://dlc-upload-test/model/", + "Uri": "oss://dlc-upload-test.oss-cn-hangzhou-internal.aliyuncs.com/model/", + "Version": "", + "DataSourceType": "oss", + }, + ], + "DLC_JOB_ID": "dlcrmzton6fvujw5", + "DLC_REGION_ID": "eflops", + "DLC_USER_ID": "", + "WORK_SPACE_ID": "wspdfsp20olq9l9m", + } + with patch( + "pai.tracking.lineage._read_metadata_config_in_dlc", + return_value=mock_config, + ): + log_lineage( + input_entities=[ + LineageEntity( + uri="file:///mnt/input/pvc/dataset", + resource_type="model", + resource_use="extension", + ), + LineageEntity( + uri="file:///mnt/input/nas/dataset", + resource_type="dataset", + resource_use="train", + ), + ], + output_entities=[ + LineageEntity( + uri="file:///mnt/output/model/model.pth", + resource_type="model", + resource_use="extension", + ) + ], + ) + self.assertIn( + "DEBUG:pai.tracking.lineage:[_LineageEntity(Attributes={'ResourceType': 'model', 'ResourceUse': 'extension', 'RegionId': 'eflops', 'ClusterId': 'cbb8f09f999534c5187532a19fe6a3bba', 'NameSpace': 'quota2md3bak6ovi', 'PvcName': 'nas-pvc', 'Path': '/nas', 'PvcType': 'hostPath'}, EntityType='pvc-file', Name=None, QualifiedName=None), _LineageEntity(Attributes={'Uri': None, 'ResourceType': 'dataset', 'ResourceUse': 'train', 'RegionId': 'eflops', 'FileSystemId': '2964e349a2d', 'Path': '/'}, EntityType='nas-file', Name=None, QualifiedName=None)]", + captured.output[0], + ) + self.assertIn( + "DEBUG:pai.tracking.lineage:[_LineageEntity(Attributes={'Bucket': 'dlc-upload-test', 'Path': 'model/model.pth', 'ResourceType': 'model', 'ResourceUse': 'extension', 'RegionId': 'cn-hangzhou'}, EntityType='oss-file', Name=None, QualifiedName=None)]", + captured.output[1], + ) + + @mock_env(DLC_JOB_ID="d123456") + @mock_env(REGION="cn-hangzhou") + def test_log_lineage_with_valid_entities_in_dlc(self): + with self.assertLogs( + logger=get_logger("pai.tracking.lineage"), level=logging.DEBUG + ) as captured: + with patch( + "pai.api.api_container.ResourceAPIsContainerMixin.lineage_api", + return_value={}, + ): + log_lineage( + input_entities=[ + LineageEntity( + uri="oss://test-bucket.oss-cn-shanghai.aliyuncs.com/models/ALBERTv2-Chinese-NewsBase.pth", + resource_type="model", + resource_use="base", + ), + LineageEntity( + uri="pai://datasets/d-jipftzxinc7nm1z0uh/v1", + resource_type="dataset", + resource_use="train", + ), + LineageEntity( + uri="odps://project_mc/tables/flow_model_label_table_v1", + resource_type="dataset", + resource_use="test", + ), + ], + output_entities=[ + LineageEntity( + uri="oss://hangzhoutest01.oss-cn-hangzhou-internal.aliyuncs.com/models/model.pth", + resource_type="model", + resource_use="extension", + ) + ], + ) + self.maxDiff = None + self.assertEquals( + "DEBUG:pai.tracking.lineage:[_LineageEntity(Attributes={'Bucket': 'test-bucket', 'Path': 'models/ALBERTv2-Chinese-NewsBase.pth', 'ResourceType': 'model', 'ResourceUse': 'base', 'RegionId': 'cn-shanghai'}, EntityType='oss-file', Name=None, QualifiedName=None), _LineageEntity(Attributes={'ResourceUse': 'train', 'Provider': 'pai'}, EntityType=None, Name='Aishell_1_subset_qwen', QualifiedName='pai-dataset.d-jipftzxinc7nm1z0uh_v1'), _LineageEntity(Attributes={'ResourceType': 'dataset', 'ResourceUse': 'test'}, EntityType=None, Name=None, QualifiedName='maxcompute-table.project_mc.flow_model_label_table_v1')]", + captured.output[0], + ) + self.assertEquals( + "DEBUG:pai.tracking.lineage:[_LineageEntity(Attributes={'Bucket': 'hangzhoutest01', 'Path': 'models/model.pth', 'ResourceType': 'model', 'ResourceUse': 'extension', 'RegionId': 'cn-hangzhou'}, EntityType='oss-file', Name=None, QualifiedName=None)]", + captured.output[1], + ) + self.assertEquals("DEBUG:pai.tracking.lineage:d123456", captured.output[2]) + + @mock_env(DLC_JOB_ID="d123456") + @mock_env(REGION="cn-hangzhou") + def test_log_lineage_with_invalid_format_input_entities_in_dlc(self): + with self.assertLogs( + logger=get_logger("pai.tracking.lineage"), level=logging.WARNING + ) as captured: + log_lineage( + input_entities=[ + LineageEntity( + uri="oss://test-bucket/models/ALBERTv2-Chinese-NewsBase.pth", + resource_type="model", + resource_use="base", + ), + LineageEntity( + uri="pai://datasets/", + resource_type="dataset", + resource_use="train", + ), + LineageEntity( + uri="odps://project_mc/flow_model_label_table_v1", + resource_type="dataset", + resource_use="test", + ), + ], + output_entities=[ + LineageEntity( + uri="oss://test-bucket.oss-cn-shanghai.aliyuncs.com/model/model.pth", + resource_type="model", + resource_use="extension", + ) + ], + ) + self.assertIn( + "input_entities or output_entities is empty, ignore.", + captured.output[0], + ) + + @mock_env(DLC_JOB_ID="d123456") + @mock_env(REGION="cn-hangzhou") + def test_log_lineage_with_invalid_format_output_entities_in_dlc(self): + with self.assertLogs( + logger=get_logger("pai.tracking.lineage"), level=logging.WARNING + ) as captured: + log_lineage( + input_entities=[ + LineageEntity( + uri="oss://test-bucket.oss-cn-shanghai.aliyuncs.com/models/ALBERTv2-Chinese-NewsBase.pth", + resource_type="model", + resource_use="base", + ), + LineageEntity( + uri="pai://datasets/d-jipftzxinc7nm1z0uh/v1", + resource_type="dataset", + resource_use="train", + ), + LineageEntity( + uri="odps://project_mc/tables/flow_model_label_table_v1", + resource_type="dataset", + resource_use="test", + ), + ], + output_entities=[ + LineageEntity( + uri="oss://test-bucket/models/ALBERTv2-Chinese-NewsBase.pth", + resource_type="model", + resource_use="extension", + ), + LineageEntity( + uri="pai://datasets/", + resource_type="dataset", + resource_use="val", + ), + LineageEntity( + uri="odps://project_mc/flow_model_label_table_v1", + resource_type="dataset", + resource_use="test", + ), + ], + ) + self.assertIn( + "input_entities or output_entities is empty, ignore.", + captured.output[0], + ) + + @mock_env(DLC_JOB_ID="d123456") + @mock_env(REGION="cn-hangzhou") + def test_log_lineage_with_empty_input_output_entities_in_dlc(self): + with self.assertLogs( + logger=get_logger("pai.tracking.lineage"), level=logging.WARNING + ) as captured: + log_lineage([], []) + self.assertIn( + "input_entities or output_entities is empty, ignore.", + captured.output[0], + ) + log_lineage( + input_entities=[LineageEntity(uri="")], + output_entities=[LineageEntity(uri="")], + ) + self.assertIn( + "input_entities or output_entities is empty, ignore.", + captured.output[1], + ) diff --git a/tests/integration/test_model/__init__.py b/tests/integration/test_model/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/integration/test_model.py b/tests/integration/test_model/test_model.py similarity index 90% rename from tests/integration/test_model.py rename to tests/integration/test_model/test_model.py index 42898ca..d6e3a5d 100644 --- a/tests/integration/test_model.py +++ b/tests/integration/test_model/test_model.py @@ -29,8 +29,10 @@ from pai.model import ( InferenceSpec, Model, + NodeStorageConfig, RegisteredModel, ResourceConfig, + SharedMemoryConfig, container_serving_spec, ) from tests.integration import BaseIntegTestCase @@ -79,18 +81,23 @@ def test_container_serving(self): command="python serving.py", image_uri=image_uri, port=5000, + storage_configs=[ + SharedMemoryConfig(size_limit=1), + NodeStorageConfig(mount_path="/ml/disk/"), + ], ) + self.assertEqual(len(inference_spec.storage), 3) model = Model( inference_spec=inference_spec, model_data=os.path.join(test_data_dir, "xgb_model/model.json"), ) + predictor = model.deploy( service_name=make_eas_service_name("container_serving"), instance_type="ecs.c6.xlarge", serializer=NumpyBytesSerializer(), ) self.predictors.append(predictor) - # hack: wait for service ready df = pd.read_csv( os.path.join(test_data_dir, "breast_cancer_data/test.csv"), ) @@ -240,8 +247,7 @@ def tearDownClass(cls): def test_tmp_algo_rm_train(self): """Test training registered model with temporary algorithm""" m = RegisteredModel( - model_name="easynlp_pai_bert_tiny_zh", - model_version="0.1.0", + model_name="qwen1.5-0.5b-chat", model_provider="pai", ) @@ -252,9 +258,9 @@ def test_tmp_algo_rm_train(self): outputs_data = est.get_outputs_data() self.assertTrue(isinstance(outputs_data, dict)) self.assertTrue(outputs_data) - self.assertTrue(len(outputs_data) == 1) + self.assertTrue(len(outputs_data) == 2) - model_path = os.path.join(outputs_data["model"], "pytorch_model.bin") + model_path = os.path.join(outputs_data["model"], "model.safetensors") self.assertTrue(self.is_oss_object_exists(model_path)) @pytest.mark.timeout(60 * 10) @@ -267,9 +273,26 @@ def test_builtin_algo_rm_train(self): ) est = m.get_estimator() + + self.assertEqual( + est.labels.get("BaseModelUri"), + m.uri, + ) + + self.assertEqual( + est.labels.get("RootModelName"), + m.model_name, + ) + self.assertEqual( + est.labels.get("RootModelID"), + m.model_id, + ) + inputs = m.get_estimator_inputs() est.hyperparameters["max_epochs"] = 5 est.hyperparameters["warmup_epochs"] = 2 + est.hyperparameters["image_scale"] = "640,640" + est.hyperparameters["train_batch_size"] = 8 est.fit(inputs=inputs) outputs_data = est.get_outputs_data() @@ -291,8 +314,14 @@ def test_rm_deploy(self): ) p = m.deploy() - self.predictors.append(p) + + self.assertEqual(p.labels.get("RootModelID"), m.model_id) + self.assertEqual(p.labels.get("RootModelName"), m.model_name) + self.assertEqual(p.labels.get("RootModelVersion"), m.model_version) + self.assertEqual(p.labels.get("BaseModelUri"), m.uri) + self.assertEqual(p.labels.get("Task"), m.task) + self.assertEqual(p.labels.get("Domain"), m.domain) self.assertTrue(p.service_name) res = p.predict(["开心", "死亡"]) self.assertTrue(isinstance(res, list)) @@ -300,6 +329,24 @@ def test_rm_deploy(self): self.assertTrue(res[0]["label"] == "正向") self.assertTrue(res[1]["label"] == "负向") + @pytest.mark.timeout(60 * 10) + @skipUnless( + False, "No available model in prod environment, please run this case manually." + ) + def test_model_evaluation(self): + m = RegisteredModel( + model_name="qwen-7b-chat", + model_version="0.2.5", + model_provider="pai", + ) + self.assertIsNotNone(m.evaluation_spec) + + inputs = m.get_evaluation_inputs() + processor = m.get_eval_processor( + instance_type="ecs.c6.large", + ) + processor.run(inputs=inputs) + class TestInferenceSpec(BaseIntegTestCase): def test_mount_local_source(self): diff --git a/tests/integration/test_model/test_model_recipe.py b/tests/integration/test_model/test_model_recipe.py new file mode 100644 index 0000000..95084a5 --- /dev/null +++ b/tests/integration/test_model/test_model_recipe.py @@ -0,0 +1,156 @@ +# Copyright 2024 Alibaba, Inc. or its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +from pathlib import Path +from unittest import skipUnless + +import pytest + +from pai.common.utils import camel_to_snake, random_str +from pai.image import retrieve +from pai.job import SpotSpec +from pai.job._training_job import ResourceType +from pai.model import ModelTrainingRecipe, RegisteredModel +from pai.model._model_recipe import ModelRecipeType +from tests.integration import BaseIntegTestCase +from tests.integration.utils import t_context +from tests.test_data import test_data_dir + + +@pytest.mark.timeout(60 * 30) +class TestModelRecipe(BaseIntegTestCase): + + _service_names = [] + + @classmethod + def tearDownClass(cls): + sess = cls.default_session + for s in cls._service_names: + try: + sess.service_api.delete(s) + except Exception as e: + print("Failed to delete service: ", e) + + def _gen_service_name(self, prefix: str = None): + prefix = prefix or "sdk_test_" + camel_to_snake(type(self).__name__) + name = "{}_{}".format(prefix, random_str(6)) + self._service_names.append(name) + return name + + def test_training_e2e(self): + model = RegisteredModel(model_name="qwen1.5-0.5b-chat", model_provider="pai") + training_recipe = model.training_recipe(method="QLoRA_LLM") + training_recipe.train() + self.assertIsNotNone(training_recipe.model_data()) + + predictor = training_recipe.deploy( + service_name=self._gen_service_name("test_recipe_e2e"), + ) + openai = predictor.openai() + resp = openai.chat.completions.create( + model="default", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "What is the meaning of life?"}, + ], + max_tokens=100, + ) + self.assertIsNotNone(resp.choices[0].message.content) + + @skipUnless(t_context.support_spot_instance, "Skip spot instance test") + def test_spot_instance(self): + training_recipe = ModelTrainingRecipe( + model_name="qwen2-7b-instruct", + model_provider="pai", + method="Standard", + resource_type=ResourceType.Lingjun, + spot_spec=SpotSpec( + spot_strategy="SpotWithPriceLimit", + spot_discount_limit=0.5, + ), + instance_type="ml.gu7ef.8xlarge-gu100", + ) + train_data = os.path.join(test_data_dir, "chinese_medical/train_sampled.json") + training_recipe.train( + inputs={ + "train": train_data, + }, + ) + + def test_custom_inputs_train(self): + model = RegisteredModel(model_name="qwen1.5-0.5b-chat", model_provider="pai") + training_recipe = model.training_recipe(method="QLoRA_LLM") + self.assertTrue( + bool(training_recipe.default_inputs), + "Default inputs is empty for ModelTrainingRecipe.", + ) + + self.assertIsNotNone(training_recipe.hyperparameter_definitions) + train_data = os.path.join(test_data_dir, "chinese_medical/train_sampled.json") + training_job = training_recipe.train( + inputs={ + "train": train_data, + }, + ) + self.assertIsNotNone(training_job) + self.assertIsNotNone(training_recipe.model_data()) + predictor = training_recipe.deploy( + service_name=self._gen_service_name("test_custom"), + ) + openai = predictor.openai() + resp = openai.chat.completions.create( + model="default", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "What is the meaning of life?"}, + ], + max_tokens=100, + ) + self.assertIsNotNone(resp.choices[0].message.content) + + def test_custom_args(self): + command = ["echo", "helloworld"] + xgb_img = retrieve("xgboost", "latest") + hps = { + "num_train_epochs": "helloworld", + } + session = self.default_session + + recipe = ModelTrainingRecipe( + model_name="qwen1.5-0.5b-chat", + model_provider="pai", + source_dir=str(Path(test_data_dir) / "xgb_train"), + command=command, + hyperparameters=hps, + image_uri=xgb_img.image_uri, + ) + job = recipe.train( + wait=False, + ) + self.assertListEqual(job.algorithm_spec.command, command) + self.assertEqual(job.algorithm_spec.image, xgb_img.image_uri) + job_hps = {hp.name: hp.value for hp in job.hyperparameters if hp.name in hps} + self.assertDictEqual(job_hps, hps) + self.assertEqual( + job.algorithm_spec.code_dir.location_value.bucket, + session.oss_bucket.bucket_name, + ) + + def test_compression(self): + model = RegisteredModel(model_name="qwen2-0.5b-instruct", model_provider="pai") + compress_recipe = model.model_recipe( + recipe_type=ModelRecipeType.COMPRESSION, method="Quantization:MinMax-8Bit" + ) + compress_recipe.run() + self.assertIsNotNone(compress_recipe.latest_job.output_path()) diff --git a/tests/integration/test_modelscope.py b/tests/integration/test_modelscope.py index fa357b7..95766df 100644 --- a/tests/integration/test_modelscope.py +++ b/tests/integration/test_modelscope.py @@ -30,7 +30,7 @@ def test_base(self): est = ModelScopeEstimator( command="python -c 'import modelscope; print(modelscope.__version__)'", instance_type="ecs.c6.large", - modelscope_version="1.6.1", + modelscope_version="1.12.0", base_job_name="sdk-ms-train", ) self.assertIsNotNone(est.training_image_uri()) diff --git a/tests/integration/test_predictor.py b/tests/integration/test_predictor.py index 10278fc..f7b6b5a 100644 --- a/tests/integration/test_predictor.py +++ b/tests/integration/test_predictor.py @@ -14,7 +14,6 @@ import json import os -import time import numpy as np @@ -85,8 +84,6 @@ def _init_predictor(cls): p = Predictor(service_name=service_name) p.wait_for_ready() - # hack: wait for service to be really ready - time.sleep(15) return p @classmethod @@ -235,8 +232,6 @@ def _init_predictor(cls): p = Predictor(service_name=service_name) p.wait_for_ready() - # hack: wait for service to be really ready - time.sleep(15) return p @classmethod diff --git a/tests/integration/test_processor.py b/tests/integration/test_processor.py new file mode 100644 index 0000000..1878d7c --- /dev/null +++ b/tests/integration/test_processor.py @@ -0,0 +1,92 @@ +# Copyright 2024 Alibaba, Inc. or its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from pai.common.utils import random_str +from pai.experiment import Experiment +from pai.image import retrieve +from pai.job import ExperimentConfig +from pai.processor import Processor +from pai.session import get_default_session +from tests.integration import BaseIntegTestCase +from tests.test_data import SCRIPT_DIR_PATH, test_data_dir + + +class TestProcessor(BaseIntegTestCase): + job_output_path = None + + @classmethod + def setUpClass(cls): + super(TestProcessor, cls).setUpClass() + oss_bucket = get_default_session().oss_bucket # type oss2.Bucket + + cls.breast_cancer_test_data_uri = cls.upload_file( + oss_bucket=oss_bucket, + location="sdk-test/test_data/breast_cancer_data/test/", + file=os.path.join(test_data_dir, "breast_cancer_data/test.csv"), + ) + cls.processing_output_uri = cls.get_oss_uri( + oss_bucket=oss_bucket, + location="sdk-test/output/processing/", + ) + + def test_processing_run(self): + image_uri = retrieve("pytorch", framework_version="1.12").image_uri + processor = Processor( + image_uri=image_uri, + source_dir=SCRIPT_DIR_PATH, + command="python main.py --output_path=/ml/output/flag", + instance_type="ecs.c6.large", + base_job_name="processing", + ) + + processor.run( + inputs={"test": self.breast_cancer_test_data_uri}, + outputs={"flag": self.processing_output_uri}, + ) + + success_flag = os.path.join(self.processing_output_uri, "output.txt") + + self.assertIsNotNone(self.is_oss_object_exists(success_flag)) + + def test_train_with_experiment_config(self): + exp_name = f"sdk_estimator_test_{random_str(6)}" + self.experiment = Experiment.create( + artifact_uri="oss://{}/sdktest/test_experiment/sdk_estimator_test_experiment/".format( + self.default_session.oss_bucket.bucket_name + ), + name=exp_name, + ) + + image_uri = retrieve("pytorch", framework_version="1.12").image_uri + processor = Processor( + image_uri=image_uri, + source_dir=SCRIPT_DIR_PATH, + command="python main.py --output_path=/ml/output/flag", + instance_type="ecs.c6.large", + base_job_name="processing", + experiment_config=ExperimentConfig( + experiment_id=self.experiment.experiment_id + ), + ) + + processor.run( + inputs={"test": self.breast_cancer_test_data_uri}, + outputs={"flag": self.processing_output_uri}, + ) + + self.assertIsNotNone(processor.latest_job) + self.assertIsNotNone(processor.latest_job.training_job_name) + self.assertIsNotNone(processor.latest_job.experiment_config) diff --git a/tests/integration/tests_pipeline/test_pipeline_build.py b/tests/integration/tests_pipeline/test_pipeline_build.py index 3079f6c..f371cc1 100644 --- a/tests/integration/tests_pipeline/test_pipeline_build.py +++ b/tests/integration/tests_pipeline/test_pipeline_build.py @@ -135,7 +135,7 @@ def test_conflict_step_names(self): }, ) - with self.assertRaisesRegexp(ValueError, "name conflict") as _: + with self.assertRaisesRegex(ValueError, "name conflict") as _: _ = Pipeline(steps=[split_step_1, split_step_2]) def test_auto_step_name(self): @@ -207,7 +207,7 @@ def test_pipeline_cycle_detect(self): ) data_source_step.after(type_transform_step) - with self.assertRaisesRegexp(ValueError, "Cycle dependency detected") as _: + with self.assertRaisesRegex(ValueError, "Cycle dependency detected") as _: _ = Pipeline( steps=[type_transform_step, data_source_step], ) diff --git a/tests/integration/utils.py b/tests/integration/utils.py index bc19f51..86be75b 100644 --- a/tests/integration/utils.py +++ b/tests/integration/utils.py @@ -17,6 +17,7 @@ import io import os import shutil +import subprocess import uuid from collections import namedtuple @@ -85,7 +86,16 @@ def __init__(self, pai_service_config, oss_config, maxc_config): @property def has_docker(self): - return shutil.which("docker") is not None + # Check if docker daemon is running + return ( + shutil.which("docker") is not None + and subprocess.run( + ["docker", "stats", "--no-stream"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ).returncode + == 0 + ) @property def has_gpu(self): @@ -95,9 +105,13 @@ def has_gpu(self): def is_inner(self): return self.pai_service_config.region_id == "center" + @property + def support_spot_instance(self): + return self.pai_service_config.region_id == "cn-wulanchabu" + @classmethod def _load_test_config(cls): - test_config = os.environ.get("PAI_TEST_CONFIG", "test_public.ini") + test_config = os.environ.get("PAI_TEST_CONFIG", "test.ini") cfg_parser = configparser.ConfigParser() cfg_parser.read(os.path.join(_test_root, test_config)) @@ -170,9 +184,11 @@ def make_resource_name(case_name, resource_type=None, sep="-", time_suffix=True) "sdktest", resource_type, case_name, - datetime.datetime.now().isoformat(timespec="seconds") - if time_suffix - else random_str(10), + ( + datetime.datetime.now().isoformat(timespec="seconds") + if time_suffix + else random_str(10) + ), ], ) ) diff --git a/tests/test_data/chinese_medical/train_sampled.json b/tests/test_data/chinese_medical/train_sampled.json new file mode 100644 index 0000000..11c3bf7 --- /dev/null +++ b/tests/test_data/chinese_medical/train_sampled.json @@ -0,0 +1,202 @@ +[ + { + "instruction": "你是一个消化科医生,请根据患者的问题给出建议:肚子疼上不来气老打嗝不排气感觉胸闷堵得上浑身没劲儿", + "output": "根据你的情况来看主要是由於脾胃气滞致使的病症!建议你最好是用些健胃消食,调理脾胃,平时用些小米苡米粥来补益脾胃!其实得了胃胀并不可怕,患者要自己是可以战胜病魔的,只要在早期发觉的时候及时救治,缓解病情及康复的率都非常大。此外还需要建议患者多加注意休息,不能吃辛辣刺激的食物。" + }, + { + "instruction": "你是一个肝病科医生,请根据患者的问题给出建议:如果体液或者血液干了,是不是乙肝病毒也濒死了???", + "output": "根据你的叙述,这种情况不是乙肝病毒不存在了。果有乙肝病毒,建议去医院检查下,并且救治,效果还是很好的。肝病患者可以在理解对症救治的同时,在生活中用食疗做为配置救治,在日常生活饮食中需要有恰当饮食,防止刺激性食物。建议泥去正规专业的医院进行救治。" + }, + { + "instruction": "你是一个肝病科医生,请根据患者的问题给出建议:我有大学同学是挺严重的肝炎,我不知道我的抗体还在不在,在和他相处的过程中应当留意些什么呢?", + "output": "留意点吧,不要和乙型肝炎病人及乙肝病毒携带者共用毛巾、牙膏、被褥等,以免生活接触性传染。其实得了肝炎并不可怕,患者要认为自己是可以战胜病魔的,只要在早期发觉的时候及时救治,纾解病情及康复的几率都非常大。身体如果有不适的地方建议尽快到正规的医院进行检查救治。" + }, + { + "instruction": "你是一个消化科医生,请根据患者的问题给出建议:我有糜烂性胃炎,胃底易发息肉,四肢无力,前些日子吃了中药,也没转好,我得怎么办?", + "output": "胃病需要有慢慢调养,不要着急!建议你平时压制饮食,不要吃辛辣打击食物,多吃温热食物,不要着凉,不定期的复检!胃炎病情很严重,建议患者马上实行救治,期望患者可以根据医生的意见对症救治。同时看重饮食问题,三餐规律,防止辛辣打击食物,以免放柔病情。" + }, + { + "instruction": "你是一个心血管科医生,请根据患者的问题给出建议:我去医院仔细检查说我高血压,这种情况比较近一直都在注意身体方面问题。高血压钾可不可以补充呢?", + "output": "您好,如果患高血压,一般需要有根据病情来口服降压药物,如果同时有高钾血症,就注意不要再专心补氯化钾。如果再次出现了低钾血症,缺钾的情况下是可以补充的,并绝不会影响血压的治疗,一般需要有注意是不是口服的力量药物或者有保钾利尿药物的口服。祝您身体健康。" + }, + { + "instruction": "你是一个肝病科医生,请根据患者的问题给出建议:怀上三个月查出来乙肝小三阳,病毒量24500现在7个半月,要不要采用宫内截断", + "output": "乙肝是一种常见的病毒性传染病,是机体遭到乙肝病毒传染引来的根据你讲述的情况,这个时间不需要做截断救治,宫内传染发生率不低于5%,可以在临产前做截断救治其实得了乙肝并不可怕,患者要认为自己是可以战胜病魔的,只要在早期发觉的时候及时救治,压制病情及康复的几率都非常大。" + }, + { + "instruction": "你是一个肝病科医生,请根据患者的问题给出建议:2001年检查患了乙肝大三阳,有救治过。现在也没好。现在人很瘦,脸色也不好,黄黄的,和这个病友关吗?", + "output": "那要看你的肝功,彩超还正常,如果一直正常,可以暂时不救治,救治大三阳外,建议患者在日常生活中要留意饮食,并且要始终保持愉悦的心情,以此来增强体质放慢康复速度。乙肝大三阳这种疾病不容易治愈,建议去正规专业的医院进行治疗,有不适的地方要尽快到医院进行治疗。" + }, + { + "instruction": "你是一个心血管科医生,请根据患者的问题给出建议:上个月我被查出来有严重高血压,他们叫我注意一些东西,我当时忘了。您好,高血压忌什么?", + "output": "高血压日常饮食建议采用清淡饮食,除了限盐以外饮食没特别的影响,高血压是具有心血管很常见的疾病,需要有长期口服药物治疗。平时需要有适当的活动,可以增进身体的血压平稳,高血压不及时治疗,容易引来中风的情况再次发生。中风就比较麻烦了。平时要注意个人生活习惯,饮食要健康。" + }, + { + "instruction": "你是一个消化科医生,请根据患者的问题给出建议:吃一点儿东西胃就胀;;不好吸收;;而且体重绝不会增强;;", + "output": "有胃肠炎吗?糯米做的东西和硬的食物别吃多活动和吃水果有助消化少吃多餐胃胀这种疾病不易康复。患者朋友理应保持良好的心态,用积极的心态去直面它,只有这样才能降低患者抗衡胃胀的信心,这样一定能获得康复。此外还需要建议患者多加注意休息,不能吃辛辣刺激的食物。" + }, + { + "instruction": "你是一个消化科医生,请根据患者的问题给出建议:胃胀,恶心,打嗝,感觉无法吸收食物。", + "output": "从你叙述的情况看,主要考虑是慢性胃炎的症状。建议最好是去医院做个胃镜看下,主要是要剔除胃癌等特殊情况,胃胀病情严重就要立即对症救治,恰当用药,否则会致使胃胀再次病发,除此之外,患者还需要有恰当饮食,始终保持心情愉快,此外还需要建议患者多加注意休息,不要吃辛辣刺激的食物。" + }, + { + "instruction": "你是一个消化科医生,请根据患者的问题给出建议:我有浅表性胃炎和萎缩性胃炎幽门螺杆菌300以上是阳性怎么办?", + "output": "有幽门螺杆菌感染救治当抗幽门螺杆菌建议三联疗法火四联疗法:两种抗生素奥美拉唑或两种抗生素奥美拉唑胃粘膜守护药除了及时救治胃炎外,患者朋友理应始终保持愉快的心态去直面疾病,只有这样才能降低患者的免疫力以及抗衡疾病的信念,同时要多看重自身饮食护理,多观注自身的症状变动,认为这样一定能将胃炎撵走。" + }, + { + "instruction": "你是一个心血管科医生,请根据患者的问题给出建议:外公一直在用降压药,不知晓可不可以吃蜂王浆,蜂王浆高血压能吃吗?", + "output": "您好蜂王浆这种保健品对于高血压患者是可以吃的,它对血压没下降作用,但是同时也不减低血压。老年人喝蜂王浆可以消化体内需要有的营养物质,对人体是有一定的好处的,对高血压患者需要有长期注意低盐低脂饮食,把烟酒癖好都要戒掉,要有规律的活动。以上可以做为参考。" + }, + { + "instruction": "你是一个肝病科医生,请根据患者的问题给出建议:我和男朋友相识4年,很恩爱,但是他有乙肝,和他做爱我会被感染吗", + "output": "如果有口腔溃疡就有可能感染的你可以打乙肝疫苗的,只要打了乙肝疫苗产生了抗体,就用不着忧心被感染了针对患者来说,肝病病情严重就要及时对症救治,恰当用药,否则会致使肝病再次病发,除此之外,患者还需要有恰当饮食,始终保持心情愉快,期望上述的问题答案可以协助到大家,谢谢。" + }, + { + "instruction": "你是一个神经科医生,请根据患者的问题给出建议:马上就要工作了但是还是自己感觉不信心虽然已经治疗过了又难过病发,癫痫能全愈吗", + "output": "癫痫病神经系统疾病,大多数的癫痫病需要有长期口服抗癫痫药物来控制。建议采用中西结合进行治疗,治疗癫痫病主要是预防出现意外伤害的再次发生,最好平时有家人陪伴。可以互相配合中医来辨证治疗的,平时应当饮食清淡不要吃辛辣油腻刺激性食物,不要有吸烟喝啤酒的不良习惯。" + }, + { + "instruction": "你是一个心血管科医生,请根据患者的问题给出建议:我有些高血压,学校近期要准备体测了,您好医生,高血压可以免体测吗?", + "output": "这个年龄再次出现高血压的情况,要戒备是否有肾性高血压的情况,注意仔细检查尿液和肾功能情况,测血压是可以半价的,到医院或学校医务室去检验血压,但是仔细检查血压前应休息半个小时左右,最好是早上监测数据比较准确,有必要做进一步仔细检查推测,注意情绪和饮食的调理。" + }, + { + "instruction": "你是一个心血管科医生,请根据患者的问题给出建议:我老公患高血压,这两天一直头痛,休息的时候也很难睡过去,医生,高血压会不会引来头痛?", + "output": "高血压是可以引来头痛的,一般高血压的主要表现出就是头痛头晕胸闷。有的也有可能会再次出现心慌。高血压引来头痛,主要是引来大脑内血压下降而引来颅内压下降致使的。治疗这种情况的话,主要是使用降压药物以及减低颅内压的药物实施治疗,最好是在医师的指导下使用。" + }, + { + "instruction": "你是一个呼吸科医生,请根据患者的问题给出建议:风寒感冒咳白痰怎么治疗?", + "output": "风寒感冒,咳有白痰的患者,主要症咳嗽痰多色白量多,兼具胸脘作闷,鼻塞,流清涕,全身酸痛,易疲劳等症状。临床诊断上常用杏苏散和二陈丸实施治疗。在用药物治疗的同时,应忌食辛辣刺激性的食物和寒凉性食物,多吃易消化营养丰富的食物,宜清淡饮食,留意歇息。" + }, + { + "instruction": "你是一个肝病科医生,请根据患者的问题给出建议:健康检查时医生说呈阳性,可能会是曱肝,您好您好曱肝与乙肝有区别吗?这个病严重吗?", + "output": "甲肝是由甲型肝炎病毒引来的,以肝脏炎症炎症为基础的传染病,主要通过粪-口途径散播,临床诊断上以疲乏,食欲减退,肝肿大,肝功能异常为主要表现出,部分病例再次出现黄疸,主要表现出为急性肝炎,无症状感染者常见。任何年龄均可患本病,但主要为儿童和青少年" + }, + { + "instruction": "你是一个心血管科医生,请根据患者的问题给出建议:我的老哥吧,他高血压拆分溃疡啊,我想问一下高血压拆分溃疡是怎么回事?", + "output": "高血压这种疾病再次出现问题的时候,一般对身体影响是比较大的,影响面也是比较广的,所以说有可能会引来一些变动的情况,而这种情况来说引来溃疡的几率非常低,不过如果一旦引来溃疡的问题,还是应当及时的减低血压,然后再治疗溃疡的问题。调节身心健康,保持自身卫生.避免因抵抗力下降而导致细菌入侵。" + }, + { + "instruction": "你是一个神经科医生,请根据患者的问题给出建议:其实是比较难受的,请问得癫痫病的人有有多少?", + "output": "癫痫的病发病因目前尚不完全明确。癫痫分成原发性癫痫和继发性癫痫。原发性癫痫可能会是由于隔代遗传因素,疾病,中毒,脑部发育不全等。不同的年龄段病发原因也各不相同。但是得了癫痫病不要太过紧张,目前为止癫痫病绝大部分都可以获得很好的控制。有的口服抗癫痫药物后甚至可以获得全愈。" + }, + { + "instruction": "你是一个神经科医生,请根据患者的问题给出建议:我感觉我家孩子的症状有点像癫痫,您好癫痫的自愈价格是多少?贵不贵?", + "output": "癫痫这种疾病由于是一种脑部神经网络生长发育异常的疾病,对于这种疾病现在还没有一次性完全治疗痊愈的药物,所以现在还不好确认治疗癫痫全愈的价格,这种疾病是需要有长期规律口服药物,所以长期接下来治疗的话是需要有一定的费用的。如果患癫痫这种疾病,一定要尽早去医院实施控制治疗。" + }, + { + "instruction": "你是一个消化科医生,请根据患者的问题给出建议:胃胀有食道裂孔疝十二指肠溃疡近期可能是焦虑症老胃胀", + "output": "你好!你的情况建议你用传统中药黑矾,黑枣,核桃仁,栀子,茯苓,砂仁,厚朴,三棱,穿山甲,寸曲,麦芽,上甲,下甲,红花,沉,铁胆粉,蜂胶,蜂蜜,蜂蜡救治,可以快速自愈;期望你正确的救治,早日康复。胃胀患者的日常救治主要是服食药物,患者可以用口服药物,互相配合维生素,而且留意规律作息,禁酒酒" + }, + { + "instruction": "你是一个神经科医生,请根据患者的问题给出建议:腹部癫痫是什么?我的小腹总是胀痛,经常都这样,也没吃什么不对的东西。", + "output": "腹部癫痫是一种以腹部症状为临床表现的癫痫。主要表现出为腹痛为突然复发或者是突然停止下来。常伴发恶心干呕,恶心呕吐等症状,复发时无意识失去。在治疗上主要是给患者口服抗癫痫的药物以及缓解并发症的症状,具体药物的用法用量,要在临床诊断医生的指导下运用。" + }, + { + "instruction": "你是一个肝病科医生,请根据患者的问题给出建议:如果我要检查艾滋、乙肝、丙肝?", + "output": "艾滋、乙肝、丙肝检验,抽一次静脉血,检查个传染系列即可。艾滋、乙肝、丙肝检验,抽一次静脉血,检查个传染系列即可。抽血所用器具都是一人个、单人单用,是绝不会感染的。传染系列的价格在300约莫。其实得了丙肝并不可怕,患者要认为自己是可以战胜病魔的,只要及时发现症状,并且对症救治,压制病情及康复的几率都非常大。" + }, + { + "instruction": "你是一个消化科医生,请根据患者的问题给出建议:干呕涨肚咳迷糊晕症状是不是胃病着凉因数啊?请指教谢谢", + "output": "考量胃炎及胃肠动力较差所致,可有腹痛腹痛,恶心干呕等一连串症状。建议饮食清淡,多饮水,留意歇息,可以服食抑酸药和胃动力药物纾解,积极救治感冒、。胃病患者在对症救治之外,患者在生活中还需要有留意始终保持恰当饮食的好习惯,消化身体营养,期望上述的答案可以协助到你,谢谢!" + }, + { + "instruction": "你是一个肝病科医生,请根据患者的问题给出建议:最近公司健康检查,查出来乙肝表面抗体是阳性是正常的吗?会不会是没抗体?", + "output": "您好朋友根据您的叙述症状,乙肝抗体阳性证明身体具备抵抗力。您好朋友根据您的叙述症状,证明您的身体具备乙肝抗体没问题,拥有健康的身体。乙肝患者的日常救治主要是对症救治,患者可以用许多口服药物,互相配合许多维生素,而且留意规律作息,禁烟酒,期望乙肝患者可以尽快康复!谢谢!" + }, + { + "instruction": "你是一个心血管科医生,请根据患者的问题给出建议:100-160高血压吃什么药?父亲以前没发现自己得了高血压,现在应当吃什么药呢?", + "output": "根据您目前叙述的情况,目前主要是发觉血压下降,您目前叙述的血压下降数值还是比较明显的达到高血压二级,再次出现这种情况容易致使心脑血管风险,要增强,建议可以在医生指导下口服钙离子拮抗剂来减低血压,平时建议注意休息,祝您健康。以上可以做为参考。" + }, + { + "instruction": "你是一个肝病科医生,请根据患者的问题给出建议:乙肝五项结果245呈阳性打过五次乙肝疫苗,这样的结果是传染乙肝了吗", + "output": "你好!根据你的报告单来看结果是你曾经有传染过乙肝病毒!现在的结果是有了抗体!而不是传染乙肝病毒。所以用不着忧心。期望我的解惑对你有所协助乙肝的治疗方法有许多,但是由于患者病情不同所以采用的诊病方法也就不一样,因此患者需要及时检查诊断,方才能对症救治。。" + }, + { + "instruction": "你是一个肝病科医生,请根据患者的问题给出建议:乙肝;肝硬化;肝腹水;脂肪肝", + "output": "乙肝的散播途径主要有三条,性传播、血液散播、母婴横向散播,如果再次出现肝硬化、肝腹水,考量有乙肝病毒拷贝,这时通过性交是可以传染给对方的。除了救治乙肝外,建议患者在日常生活中要留意饮食,并且要始终保持舒畅的心情,以此来增强体质放慢康复速度。" + }, + { + "instruction": "你是一个心血管科医生,请根据患者的问题给出建议:我有个姐姐她这么年轻就有高血压,而且已经血管破裂,高血压引来血管破裂怎么办?", + "output": "高血压引来血管破裂多是致使脑出血,脑出血的原因大多数是老龄、高血压和动脉硬化所引发的脑出血。年轻人脑出血,若无基础疾病及外伤,一般多考虑脑血管疾病,如动脉瘤,动静脉畸形等,除了血液性疾病,如白血病引发的出血性脑卒中。新生儿脑出血主要是由于孕期的维生素K缺乏和产伤引发的脑出血。" + }, + { + "instruction": "你是一个神经科医生,请根据患者的问题给出建议:我有个朋友,他得了癫痫症,最近他还复发了,您好癫痫类型有哪些?", + "output": "癫痫类型有全部性的,的局部性的,有完全性的,一般患了癫痫病主要要作好防御措施才行的,因为这种病复发下来哪里倒地,会伤到自已的身体,所以家人一定要注意了,防复发时没人在身边引发不急时治疗引发生命危险的,所以癫痫病有很多种不同,也不知晓什么时候复发的。" + }, + { + "instruction": "你是一个消化科医生,请根据患者的问题给出建议:可以治好我的胃胀胃炎吗", + "output": "你的情况考虑是慢性胃炎或者消化性溃疡引来的,需要有积极胃镜仔细检查看一看。可以服食药物看看。平时留意规律饮食,禁酒酒,禁极冷酸辣食物,不要暴饮暴食胃胀的治疗方法有,但是由于患者病情不同所以采用的诊病方法也就不一样,因此需要有患者尽快诊断,方才能对症下药。。" + }, + { + "instruction": "你是一个呼吸科医生,请根据患者的问题给出建议:感冒咳胸部麻木15天是怎么回事?", + "output": "感冒咳再次出现胸部麻木的症状,一般都是由于遭到了一定的影响才会引来的,这时候有可能是长时间的异常受力引来了局部神经损伤,或者是说因为长期的咳所引来的,所以说这种情况只有咳停止下来之后,症状才会慢慢彻底恢复,一般可以使用甘草片或者是清肺糖浆来治疗。" + }, + { + "instruction": "你是一个心血管科医生,请根据患者的问题给出建议:我最近身体不舒服,看见电视上有护理高血压的情景剧,所以我想您好护理高血压情景剧是真的吗?", + "output": "您好,高血压情景剧毕竟只是情景剧,而不是科教片,所以,里面所说的造成护理高血压的方法,不能够完全误信,还是要遵从专业医生的建议。建议高血压患者平时饮食宜清淡,不要过咸及油腻食物,适当多吃蔬菜水果,以防情绪震荡,不要过度操劳,适当运动,控制体重。" + }, + { + "instruction": "你是一个心血管科医生,请根据患者的问题给出建议:我平时很喜欢吃甜的食物,最近被查出来患高血压,您好医生高血压可以吃甜的吗?", + "output": "高血压可以吃甜食的,但是经常吃甜食有利于身体健康,有可能会引来糖尿病的,所以在平时需要有低糖饮食,低盐饮食,预防工作高血压的缓解,对于高血压,相信与隔代遗传,身体肥胖也是有关系,所以需要有尽快积极的治疗,可以口服硝苯地平缓释片,对身体是有好处的。" + }, + { + "instruction": "你是一个肝病科医生,请根据患者的问题给出建议:小孩3岁了,如何以防肝炎感染?因为我们都有肝炎,要常给孩子做健康检查吗?", + "output": "肝炎的类型比较多,您好您具体是哪种类型的肝炎?甲肝,乙肝,还是丙肝?肝炎患者的日常救治主要是服食药物,患者可以用许多口服药物,互相配合许多维生素,而且留意自身护理,恰当饮食,防止寒冷食物,期望肝炎患者可以尽快康复!建议你带孩子去正规专业的医院进行救治。" + }, + { + "instruction": "你是一个神经科医生,请根据患者的问题给出建议:我想理解一下治疗癫痫病的有几种?可有可以一次性就全愈的治疗方法?", + "output": "癫痫在全国范围之内都是个非常棘手的疾病,因为他没很好的方法能治疗痊愈,只好通过药物控制来缓解癫痫的复发,如果患者一年复发低于两次的话,就建议服药来控制,因为癫痫的复发实际上症状比较可怕,会给患者引发极大的心理干扰。目前并没有个药物能完全一次性治愈癫痫。" + }, + { + "instruction": "你是一个肝病科医生,请根据患者的问题给出建议:已经怀孕2个月了,在婚检的时候查的出的可能会患小三阳。", + "output": "只要经过救治,肝功正常,病毒量阴性就是可以要孩子的。针对患者来说,肝病病情严重就要及时对症救治,恰当用药,否则会致使肝病再次病发,除此之外,患者还需要有看重自身的饮食以及心理护理,建议时刻保持良好的心态,期望上述的问题答案可以协助到大家,谢谢。" + }, + { + "instruction": "你是一个肝病科医生,请根据患者的问题给出建议:小三阳患者谷丙转氨酶75dna大于10的5次方怎么办", + "output": "治乙肝西医目前没特效药,中医中药长期临床实践累积了许多独特的奇方秘方,建议你用传统中药!对于肝病严重患者来说,建议马上就诊,根据医生的意见来马上救治,不要盲目误信广告药物来救治,以免令得病情严重,以上意见仅供参考。望上述的答案可以协助到您,谢谢。" + }, + { + "instruction": "你是一个肝病科医生,请根据患者的问题给出建议:最近肚脐右边随呼息有一点痛.而且右边稍微大于左边.严重吗?", + "output": "找为好中医,看一看病根在哪,从根本上救治,坚持下来有自愈的可能性除了及时救治肝病外,患者朋友理应始终保持积极的心态去直面疾病,只有这样才能令得患者及时对症救治,同时要多看重自身饮食护理,观注营养均衡,及时消化身体营养,认为这样一定能将肝病撵走。" + }, + { + "instruction": "你是一个神经科医生,请根据患者的问题给出建议:总是不舒服,难受,睡不着呢。抽动症和癫痫有什么区别呢?", + "output": "您好,很高兴为您解惑。抽动症症状更轻许多,一般不会引来神智发生改变,不会突然倒地及尿便失禁,不会有意识失去。而癫痫大发作多会再次出现短暂的意识丧失,牙关紧闭,口吐白沫,尿便失禁,痛觉彻底恢复后无法正确叙述。脑电图检查可以甄别,抽动症孩子绝大部分脑电图正常,而癫痫儿童则会再次出现脑电图异常。" + }, + { + "instruction": "你是一个消化科医生,请根据患者的问题给出建议:慢性胃炎可以通过活动稳定吗?因为我经常暧气,喝药效果不大", + "output": "留意口腔卫生,口服消炎药,局部留意清洁建议及时实施对症救治,多喝水,留意防寒,清淡容易吸收的食物其实得了胃炎并不可怕,患者要认为自己是可以战胜病魔的,只要及时发现症状,并且对症救治,纾解病情及康复的几率都非常大。用些消炎牙膏刷牙,早晚个刷一次牙,保持口腔卫生。" + }, + { + "instruction": "你是一个心血管科医生,请根据患者的问题给出建议:我妈妈血压一直有点高,可以吃三七粉的吗?高血压吃三七粉可以吗?", + "output": "高血压可以吃三七粉的,适当的吃一些是没关系的,用不着太担心,但是三七粉对于高血压患者来说并不能起些治疗的作用,建议高血压患者平时还是要注意监控器血压,不定期的测定血压,在口服药物的同时在饮食方面尽量要办到低脂低盐的饮食方式,多吃一些新鲜的蔬菜和容易吸收的食物" + }, + { + "instruction": "你是一个神经科医生,请根据患者的问题给出建议:各位医生好,这几天腿一直抽动,晚上特别严重,想问问最近腿老是抽动,抽动是癫痫吗?", + "output": "单纯的腿抽筋、抽动,不像是癫痫致使的,而像是缺钙引来的或者受寒引来的,平时需要有留意防寒,多歇息才可以的。建议,这种情况可以在防寒的基础上口服钙剂或者含钙的口服液,忌服辛辣、受寒的食物,多吃香蕉,多吃粗粮。严重的抽动则需要有去医院检验、诊病。" + }, + { + "instruction": "你是一个消化科医生,请根据患者的问题给出建议:C14幽门螺杆菌9.70是拥有正常?还是严重还是算不上很高?", + "output": "碳14呼气试验的参考值大概是在40,有的参考值在100。如果低于上述的数值,都是阴性的。所以你的9.70是阴性的。证明没幽门螺杆菌的传染。所以是正常的。幽门螺杆菌病情很严重,建议患者马上实行救治,期望患者可以根据医生的意见对症救治。同时看重饮食问题,防止辛辣打击食物。" + }, + { + "instruction": "你是一个肝病科医生,请根据患者的问题给出建议:我上次健康检查了,乙肝表面抗体是阳性,结果说:乙肝病毒已产生一定免疫力是什么意思", + "output": "两对半第二项阳性对此你体内有保护性的抗体,麻醉乙肝疫苗就是使这项转成阳性,是正常的。肝病患者在对症救治之外,患者在生活中还需要有留意要保持良好的心情,好的心情对疾病的彻底恢复很有协助,建议去正规专业的医院进行救治,期望上述的答案可以协助到你" + }, + { + "instruction": "你是一个心血管科医生,请根据患者的问题给出建议:我高血压两年了,一直忍耐喝药,感觉有点虚,想吃点参补一补,讨教下高血压可以吃丹参粉吗?", + "output": "高血压是可以喝丹参粉的,但是一定要不要过量。一天极少量喝是可以的,它是活血化瘀的药物,能稳定机体内血流情况减低血粘度,减低外周血管阻力,从而使血压下降有辅助降压的效用,但是一定要适量不要过量。平时低盐低脂饮食,少吃油腻油炸食物,饮食以清淡为基础。" + }, + { + "instruction": "你是一个心血管科医生,请根据患者的问题给出建议:老公最近因为高血压头晕的厉害还伴发头连续个礼拜了还没有转想问问高血压引来的头晕该怎么办?", + "output": "高血压是指动脉内血液压力过高。本病是临床诊断常见的全身血管性疾病。如果头晕是高血压引来那首先需要有控制血始终保持血压平稳。血压控制住头晕慢慢的也就可以缓解。建议遵遵医嘱长期服食降压药不随意加减药物剂留意药物的不良反再次出现不适症状及时复诊。" + }, + { + "instruction": "你是一个心血管科医生,请根据患者的问题给出建议:这两年年纪大了,血压也已经开始下跌,据说叶酸可以降血压,您好高血压吃叶酸有效吗?", + "output": "如果血压下降的话建议你先通过仔细检查明确是原发性高血压还是继发性高血压的,其中高血压类型有一种是拥有叶酸缺乏性的高血压叫作h型高血压,通过仔细检查同型半胱氨酸可以明确的,如果仔细检查明确后,就要通过消化叶酸治疗了,同时也要口服降压药物治疗的,平时要监测数据血压水平。" + }, + { + "instruction": "你是一个心血管科医生,请根据患者的问题给出建议:我患高血压五六年啦,天天喝药吃烦啦,哪种东西能根治高血压,高血压克星是什么?", + "output": "高血压的患者可以吃许多新鲜的水果蔬菜或者是芹菜山药之类的食物,可以起些降血压的作用,另外高血压的患者平时也应当注意低盐,低脂,低胆固醇饮食,适当的实施体育运动和锻练高血压的患者还应当在医生的指导下口服降血压的药物,断然不可擅自停药,防止对血压引发影响。" + } +] diff --git a/tests/test_data/experiment_train/train.py b/tests/test_data/experiment_train/train.py new file mode 100644 index 0000000..98ce5a7 --- /dev/null +++ b/tests/test_data/experiment_train/train.py @@ -0,0 +1,6 @@ +def run(): + print("hello world") + + +if __name__ == "__main__": + run() diff --git a/tests/test_data/read_mc_table/run.py b/tests/test_data/read_mc_table/run.py index faf0a59..915916d 100644 --- a/tests/test_data/read_mc_table/run.py +++ b/tests/test_data/read_mc_table/run.py @@ -1,5 +1,6 @@ import json import os +from itertools import islice from odps import ODPS from odps.accounts import StsAccount @@ -40,7 +41,7 @@ def read_table(): o = ODPS(account=account, project=project_name, endpoint=endpoint) # 读取输入表数据 - for record in o.read_table(table_name): + for record in islice(o.read_table(table_name), 20): print(record) diff --git a/tests/unit/test_inference_spec.py b/tests/unit/test_inference_spec.py index 49733ce..6e254b7 100644 --- a/tests/unit/test_inference_spec.py +++ b/tests/unit/test_inference_spec.py @@ -12,13 +12,21 @@ # See the License for the specific language governing permissions and # limitations under the License. -from pai.exception import DuplicatedMountException, MountPathIsOccupiedException -from pai.model import InferenceSpec +from pai.exception import DuplicatedMountException +from pai.model import ( + InferenceSpec, + NfsStorageConfig, + NodeStorageConfig, + OssStorageConfig, + RawStorageConfig, + SharedMemoryConfig, + container_serving_spec, +) from tests.unit import BaseUnitTestCase class TestInferenceSpec(BaseUnitTestCase): - def test_inference_spec(self): + def test_add_options(self): infer_spec = InferenceSpec( processor="pmml", ) @@ -37,6 +45,10 @@ def test_inference_spec(self): infer_spec.add_option("metadata.rpc.batching", True) self.assertEqual(infer_spec.metadata.rpc.batching, True) + def test_mount_storage(self): + infer_spec = InferenceSpec( + processor="pmml", + ) infer_spec.storage = [ { "mount_path": "/ml/model/", @@ -54,14 +66,6 @@ def test_inference_spec(self): d, { "processor": "pmml", - "metadata": { - "instance": 2, - "rpc": { - "keepalive": 10000, - "batching": True, - }, - }, - "name": "example", "storage": [ { "mount_path": "/ml/model/", @@ -90,7 +94,83 @@ def test_inference_spec(self): "oss://pai-sdk-example/path/to/model/", mount_path="/ml/world/" ) - with self.assertRaises(MountPathIsOccupiedException): - infer_spec.mount( - "oss://pai-sdk-example/path/to/abc/", mount_path="/ml/code/" - ) + infer_spec.mount( + "oss://pai-sdk-example/path/to/abc/edfg", mount_path="/ml/code/" + ) + + def test_set_model(self): + infer_spec = container_serving_spec( + command="python3 /ml/code/model.py", + image_uri="python:3", + ) + infer_spec.storage = [ + { + "mount_path": "/ml/code/", + "oss": { + "path": "oss://pai-sdk-example/path/to/code/", + }, + }, + ] + model_path_v1 = "oss://pai-sdk-example/path/to/model/v1/" + infer_spec.set_model_data(model_path_v1) + self.assertEqual(model_path_v1, infer_spec.storage[1].oss.path) + + model_path_v2 = "oss://pai-sdk-example/path/to/model/v2/" + infer_spec.set_model_data(model_path_v2) + self.assertEqual(model_path_v2, infer_spec.storage[1].oss.path) + + def test_storage(self): + infer_spec = container_serving_spec( + command="python3 /ml/code/model.py", + image_uri="python:3", + storage_configs=[ + OssStorageConfig( + mount_path="/ml/model/", + oss_path="oss://pai-sdk-example/path/to/model/", + ), + NfsStorageConfig( + mount_path="/ml/shared/", + nfs_server="nfs://abc", + nfs_path="/path/to/shared/", + ), + SharedMemoryConfig(size_limit=64), + NodeStorageConfig(mount_path="/ml/disk/"), + RawStorageConfig( + config={ + "image": { + "image": "MyImageUri", + "path": "/path/to/mount/", + }, + "mount_path": "/data_image", + } + ), + ], + ) + + truth = [ + { + "mount_path": "/ml/model/", + "oss": {"path": "oss://pai-sdk-example/path/to/model/"}, + }, + { + "mount_path": "/ml/shared/", + "nfs": { + "path": "/path/to/shared/", + "readOnly": False, + "server": "nfs://abc", + }, + }, + { + "empty_dir": {"medium": "memory", "size_limit": 64}, + "mount_path": "/dev/shm", + }, + {"empty_dir": {}, "mount_path": "/ml/disk/"}, + { + "image": { + "image": "MyImageUri", + "path": "/path/to/mount/", + }, + "mount_path": "/data_image", + }, + ] + self.assertListEqual(truth, infer_spec.storage) diff --git a/tests/unit/test_logging.py b/tests/unit/test_logging.py new file mode 100644 index 0000000..5875015 --- /dev/null +++ b/tests/unit/test_logging.py @@ -0,0 +1,54 @@ +# Copyright 2023 Alibaba, Inc. or its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging + +from pai.common.logging import ( + _reset_library_root_logger, + get_log_level, + get_logger, + set_log_level_debug, + set_log_level_info, +) + +from .utils import mock_env + + +@mock_env(PAI_LOG_LEVEL="DEBUG") +def test_log_level(): + _reset_library_root_logger() + + assert get_log_level() == logging.DEBUG + + +@mock_env(PAI_LOG_LEVEL="INFO") +def test_get_logger(): + _reset_library_root_logger() + + lib_root_logger = get_logger() + logger = get_logger("pai.abc") + + assert logger.parent == lib_root_logger + assert lib_root_logger.getEffectiveLevel() == logging.INFO + + +def test_set_log_level(): + _reset_library_root_logger() + + assert get_log_level() == logging.WARNING + + set_log_level_info() + assert get_log_level() == logging.INFO + + set_log_level_debug() + assert get_log_level() == logging.DEBUG diff --git a/tests/unit/test_pipeline/test_pipeline.py b/tests/unit/test_pipeline/test_pipeline.py index a09fd6a..c16df4d 100644 --- a/tests/unit/test_pipeline/test_pipeline.py +++ b/tests/unit/test_pipeline/test_pipeline.py @@ -50,7 +50,7 @@ def test_io_name_conflict(self): }, ) - with self.assertRaisesRegexp(ValueError, ".*conflict.*") as _: + with self.assertRaisesRegex(ValueError, ".*conflict.*") as _: step1 = op.as_step(name="step1") step2 = op.as_step(name="step2") _ = Pipeline( diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py index f0f9b15..0c3031e 100644 --- a/tests/unit/test_session.py +++ b/tests/unit/test_session.py @@ -14,12 +14,16 @@ import json import tempfile -from unittest.case import TestCase +from unittest.mock import patch -from pai.session import Session +from alibabacloud_credentials.credentials import Credential +from pai.session import Session, _init_default_session_from_env +from tests.unit import BaseUnitTestCase +from tests.unit.utils import mock_env -class TestSession(TestCase): + +class TestSession(BaseUnitTestCase): def test_save_config(self): d = { "region_id": "cn-hangzhou", @@ -35,3 +39,35 @@ def test_save_config(self): res = json.load(f) self.assertEqual(res, d) + + @mock_env(DSW_INSTANCE_ID="dsw-378f4930c04191016") + @mock_env(PAI_WORKSPACE_ID="1234567") + @mock_env(REGION="cn-hangzhou") + def test_init_default_session_from_env_in_dsw(self): + mock_cred = Credential() + with patch( + "pai.session.Session._get_default_credential_client", return_value=mock_cred + ): + with patch( + "pai.session.Session.get_default_oss_storage", + return_value=("bucket", "endpoint"), + ): + s = _init_default_session_from_env() + self.assertEqual(s.workspace_id, "1234567") + self.assertEqual(s.region_id, "cn-hangzhou") + + @mock_env(DLC_JOB_ID="dlcv25vrbljblbgh") + @mock_env(PAI_WORKSPACE_ID="1234567") + @mock_env(REGION="cn-shanghai") + def test_init_default_session_from_env_in_dlc(self): + mock_cred = Credential() + with patch( + "pai.session.Session._get_default_credential_client", return_value=mock_cred + ): + with patch( + "pai.session.Session.get_default_oss_storage", + return_value=("bucket", "endpoint"), + ): + s = _init_default_session_from_env() + self.assertEqual(s.workspace_id, "1234567") + self.assertEqual(s.region_id, "cn-shanghai") diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index 62d2298..5a8e533 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -19,7 +19,19 @@ import os from pai.common.oss_utils import is_oss_uri -from pai.common.utils import generate_repr, is_filesystem_uri, is_odps_table_uri +from pai.common.utils import ( + generate_repr, + is_filesystem_uri, + is_odps_table_uri, + parse_bmcpfs_uri, + parse_cpfs_uri, + parse_local_file_uri, + parse_nas_uri, + parse_odps_uri, + parse_oss_uri, + parse_pai_dataset_uri, + parse_region_id_from_endpoint, +) from tests.test_data import SCRIPT_DIR_PATH from tests.unit import BaseUnitTestCase from tests.unit.utils import extract_odps_table_info, file_checksum @@ -197,3 +209,190 @@ def test_is_filesystem_uri(self): with self.subTest(tc=tc): result = is_filesystem_uri(tc["arguments"]["uri"]) self.assertEqual(result, tc["expected"]) + + def test_parse_region_id_from_valid_endpoint(self): + self.assertEqual(parse_region_id_from_endpoint("cn-shanghai"), "cn-shanghai") + self.assertEqual( + parse_region_id_from_endpoint("cn-shanghai-internal"), "cn-shanghai" + ) + self.assertEqual( + parse_region_id_from_endpoint("cn-shanghai-internal.aliyuncs.com"), + "cn-shanghai", + ) + + def test_parse_region_id_from_invalid_endpoint(self): + self.assertIsNone(parse_region_id_from_endpoint("http://someotherendpoint.com")) + self.assertIsNone(parse_region_id_from_endpoint("")) + self.assertIsNone(parse_region_id_from_endpoint(None)) + + def test_parse_valid_oss_uri(self): + self.assertEqual( + parse_oss_uri( + "oss://test-bucket.oss-cn-hangzhou.aliyuncs.com/models/ALBERTv2-Chinese" + "-NewsBase.pth" + ), + ("test-bucket", "cn-hangzhou", "models/ALBERTv2-Chinese-NewsBase.pth"), + ) + self.assertEqual( + parse_oss_uri( + "oss://test-bucket.oss-cn-hangzhou-internal.aliyuncs.com/models/ALBERTv2-Chinese-NewsBase.pth" + ), + ("test-bucket", "cn-hangzhou", "models/ALBERTv2-Chinese-NewsBase.pth"), + ) + self.assertEqual( + parse_oss_uri("oss://test-bucket.oss-cn-hangzhou.aliyuncs.com/"), + ("test-bucket", "cn-hangzhou", "/"), + ) + self.assertEqual( + parse_oss_uri("oss://test-bucket.oss-cn-hangzhou.aliyuncs.com"), + ("test-bucket", "cn-hangzhou", "/"), + ) + + def test_parse_invalid_oss_uri(self): + self.assertIsNone( + parse_oss_uri("oss://test-bucket/models/ALBERTv2-Chinese" "-NewsBase.pth") + ) + self.assertIsNone(parse_oss_uri("oss://")) + self.assertIsNone(parse_oss_uri("")) + + def test_parse_valid_nas_uri(self): + self.assertEqual( + parse_nas_uri( + "nas://007636fd-gfyy.cn-hangzhou.extreme.nas.aliyuncs.com/mnt/foo/" + ), + ( + "nas://007636fd-gfyy.cn-hangzhou.extreme.nas.aliyuncs.com/mnt/foo/", + "cn-hangzhou", + ), + ) + self.assertEqual( + parse_nas_uri("nas://007636fd-gfyy.cn-hangzhou.extreme.nas.aliyuncs.com/"), + ( + "nas://007636fd-gfyy.cn-hangzhou.extreme.nas.aliyuncs.com/", + "cn-hangzhou", + ), + ) + self.assertEqual( + parse_nas_uri("nas://007636fd-gfyy.cn-hangzhou.extreme.nas.aliyuncs.com"), + ("nas://007636fd-gfyy.cn-hangzhou.extreme.nas.aliyuncs.com", "cn-hangzhou"), + ) + self.assertEqual( + parse_nas_uri("nas://066e54a580.cn-hangzhou/"), + ("nas://066e54a580.cn-hangzhou/", "cn-hangzhou"), + ) + + def test_parse_invalid_nas_uri(self): + self.assertIsNone( + parse_nas_uri("nas://007636fd-gfyy.extreme.nas.aliyuncs.com/mnt/foo/") + ) + self.assertIsNone(parse_nas_uri("nas://")) + self.assertIsNone(parse_nas_uri("")) + + def test_parse_valid_cpfs_uri(self): + self.assertEqual( + parse_cpfs_uri( + "cpfs://cpfs-00f4b992044a71be.cn-hangzhou/ptc-008727a69e07d3cf/exp-00d695a1b9f6c926/" + ), + ( + "cpfs://cpfs-00f4b992044a71be.cn-hangzhou/ptc-008727a69e07d3cf/exp-00d695a1b9f6c926/", + "cn-hangzhou", + ), + ) + self.assertEqual( + parse_cpfs_uri("cpfs://cpfs-00f4b992044a71be.cn-hangzhou/"), + ("cpfs://cpfs-00f4b992044a71be.cn-hangzhou/", "cn-hangzhou"), + ) + self.assertEqual( + parse_cpfs_uri("cpfs://cpfs-00f4b992044a71be.cn-hangzhou"), + ("cpfs://cpfs-00f4b992044a71be.cn-hangzhou", "cn-hangzhou"), + ) + + def test_parse_invalid_cpfs_uri(self): + self.assertIsNone(parse_cpfs_uri("cpfs://cpfs-00f4b992044a71be/mnt/foo/")) + self.assertIsNone(parse_cpfs_uri("cpfs://")) + self.assertIsNone(parse_cpfs_uri("")) + + def test_parse_valid_bmcpfs_uri(self): + self.assertEqual( + parse_bmcpfs_uri( + "bmcpfs://cpfs-291070fd9529c747-000001.cn-wulanchabu.cpfs.aliyuncs.com/sub/dir" + ), + ( + "bmcpfs://cpfs-291070fd9529c747-000001.cn-wulanchabu.cpfs.aliyuncs.com/sub/dir", + "cn-wulanchabu", + ), + ) + self.assertEqual( + parse_bmcpfs_uri( + "bmcpfs://cpfs-291070fd9529c747-000001.cn-wulanchabu.cpfs.aliyuncs.com/" + ), + ( + "bmcpfs://cpfs-291070fd9529c747-000001.cn-wulanchabu.cpfs.aliyuncs.com/", + "cn-wulanchabu", + ), + ) + self.assertEqual( + parse_bmcpfs_uri( + "bmcpfs://cpfs-291070fd9529c747-000001.cn-wulanchabu.cpfs.aliyuncs.com" + ), + ( + "bmcpfs://cpfs-291070fd9529c747-000001.cn-wulanchabu.cpfs.aliyuncs.com", + "cn-wulanchabu", + ), + ) + + def test_parse_invalid_bmcpfs_uri(self): + self.assertIsNone( + parse_bmcpfs_uri("bmcpfs://cpfs-291070fd9529c747-000001.cpfs.aliyuncs.com/") + ) + self.assertIsNone(parse_bmcpfs_uri("bmcpfs://")) + self.assertIsNone(parse_bmcpfs_uri("")) + + def test_parse_valid_local_file_uri(self): + self.assertEqual( + parse_local_file_uri("file:///mnt/dataset_123-456+789"), + "/mnt/dataset_123-456+789", + ) + self.assertEqual( + parse_local_file_uri("file:///mnt/dataset 123"), "/mnt/dataset 123" + ) + self.assertEqual(parse_local_file_uri("file:///"), "/") + + def test_parse_invalid_local_file_uri(self): + self.assertIsNone(parse_local_file_uri("file://mnt/dataset")) + self.assertIsNone(parse_local_file_uri("file://")) + self.assertIsNone(parse_local_file_uri("")) + + def test_parse_valid_pai_dataset_uri(self): + self.assertEqual( + parse_pai_dataset_uri("pai://datasets/d-123456"), ("d-123456", "1") + ) + self.assertEqual( + parse_pai_dataset_uri("pai://datasets/d-123456/2"), ("d-123456", "2") + ) + self.assertEqual( + parse_pai_dataset_uri("pai://datasets/d-123456/2/3"), ("d-123456", "2") + ) + + def test_parse_invalid_pai_dataset_uri(self): + self.assertIsNone(parse_pai_dataset_uri("pai://datasets/")) + self.assertIsNone(parse_pai_dataset_uri("pai://")) + self.assertIsNone(parse_pai_dataset_uri("")) + + def test_parse_valid_odps_uri(self): + self.assertEqual( + parse_odps_uri( + "odps://project_mc/schema1/tables/flow_model_label_table_v1" + ), + ("project_mc", "schema1", "flow_model_label_table_v1"), + ) + self.assertEqual( + parse_odps_uri("odps://project_mc/tables/flow_model_label_table_v1"), + ("project_mc", None, "flow_model_label_table_v1"), + ) + + def test_parse_invalid_odps_uri(self): + self.assertIsNone(parse_odps_uri("odps://project_mc/tables/")) + self.assertIsNone(parse_odps_uri("odps://project_mc/")) + self.assertIsNone(parse_odps_uri("odps://")) + self.assertIsNone(parse_odps_uri("")) diff --git a/tests/unit/utils.py b/tests/unit/utils.py index d5bfff4..3aa16ec 100644 --- a/tests/unit/utils.py +++ b/tests/unit/utils.py @@ -18,6 +18,7 @@ import os import re +import mock import six import yaml @@ -150,3 +151,8 @@ def file_checksum(file_name, hash_type="md5"): for chunk in iter(lambda: f.read(256 * 1024), b""): hash_md5.update(chunk) return hash_md5.hexdigest() + + +def mock_env(**kwargs): + """Decorator to set environment variables for a test function.""" + return mock.patch.dict(os.environ, kwargs) diff --git a/tools/publish_doc.sh b/tools/publish_doc.sh deleted file mode 100755 index b46f620..0000000 --- a/tools/publish_doc.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash - -#To use the script, you must have ossutil in your system PATH and config the client tool to visit the "pai-sdk" OSS bucket. -# -#usage: -# -# publish_doc preview # publish preview documents -# publish_doc production # publish production documents -# - -set -e - -function cd_docs_dir() { - docs_dir="$(dirname "$0")/../docs" - cd "$docs_dir" || (echo "cd the workdir $(docs_dir) fail" && exit) -} - -function build_doc() { - nox -s doc -} - -function publish_preview_doc() { - build_doc - public_doc preview/doc/html - echo "please visit link https://pai-sdk.oss-cn-shanghai.aliyuncs.com/pai/preview/doc/html/index.html to view the documents." -} - -function public_production_doc() { - build_doc - public_doc doc/html - echo "please visit link https://pai-sdk.oss-cn-shanghai.aliyuncs.com/pai/doc/html/index.html to view the documents." -} -# -function public_doc() { - if [ -z "$1" ]; then - echo "target_path not exists" && exit 1 - fi - echo "target_path is $1" - - ossutilmac64 cp build/html oss://pai-sdk/pai/"$1" --recursive -f -} - -release_type=${1:-preview} - -if [ $release_type == "preview" ]; then - publish_preview_doc -elif [ $release_type == "production" ]; then - public_production_doc -else - echo "unknown release_type: $release_type" && exit 1 -fi